diff --git "a/2023/UniHCP_ A Unified Model for Human-Centric Perceptions/layout.json" "b/2023/UniHCP_ A Unified Model for Human-Centric Perceptions/layout.json" new file mode 100644--- /dev/null +++ "b/2023/UniHCP_ A Unified Model for Human-Centric Perceptions/layout.json" @@ -0,0 +1,12339 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 115, + 103, + 479, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 103, + 479, + 121 + ], + "spans": [ + { + "bbox": [ + 115, + 103, + 479, + 121 + ], + "type": "text", + "content": "UniHCP: A Unified Model for Human-Centric Perceptions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "spans": [ + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": "Yuanzheng Ci" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": ", Yizhou Wang" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{2,3*}" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": ", Meilin Chen" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": ", Shixiang Tang" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": ", Lei Bai" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{3\\dagger}" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": ", Feng Zhu" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": ", Rui Zhao" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{4,5}" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": ", Fengwei Yu" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": ", Donglian Qi" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "text", + "content": ", Wanli Ouyang" + }, + { + "bbox": [ + 59, + 142, + 533, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 171, + 533, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 171, + 533, + 200 + ], + "spans": [ + { + "bbox": [ + 59, + 171, + 533, + 200 + ], + "type": "text", + "content": "1The University of Sydney, 2Zhejiang University, 3Shanghai AI Laboratory, 4SenseTime Research, 5Qing Yuan Research Institute, Shanghai Jiao Tong University, Shanghai, China" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 103, + 201, + 484, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 201, + 484, + 213 + ], + "spans": [ + { + "bbox": [ + 103, + 201, + 484, + 213 + ], + "type": "text", + "content": "yuanzheng.ci@sydney.edu.au, yizhouwang@zju.edu.cn, bailei@pjlab.org.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 241, + 192, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 241, + 192, + 253 + ], + "spans": [ + { + "bbox": [ + 143, + 241, + 192, + 253 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 266, + 290, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 290, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 290, + 555 + ], + "type": "text", + "content": "Human-centric perceptions (e.g., pose estimation, human parsing, pedestrian detection, person re-identification, etc.) play a key role in industrial applications of visual models. While specific human-centric tasks have their own relevant semantic aspect to focus on, they also share the same underlying semantic structure of the human body. However, few works have attempted to exploit such homogeneity and design a general-propose model for human-centric tasks. In this work, we revisit a broad range of human-centric tasks and unify them in a minimalist manner. We propose UniHCP, a Unified Model for Human-Centric Perceptions, which unifies a wide range of human-centric tasks in a simplified end-to-end manner with the plain vision transformer architecture. With large-scale joint training on 33 human-centric datasets, UniHCP can outperform strong baselines on several in-domain and downstream tasks by direct evaluation. When adapted to a specific task, UniHCP achieves new SOTAs on a wide range of human-centric tasks, e.g., 69.8 mIoU on CIHP for human parsing, 86.18 mA on PA100K for attribute prediction, 90.3 mAP on Market1501 for ReID, and 85.8 JI on CrowdHuman for pedestrian detection, performing better than specialized models tailored for each task. The code and pretrained model are available at https://github.com/OpenGVLab/UniHCP." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 578, + 128, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 578, + 128, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 578, + 128, + 590 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 599, + 287, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 599, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 46, + 599, + 287, + 684 + ], + "type": "text", + "content": "Research on human-centric perceptions has come a long way with tremendous advancements in recent years. Many methods have been developed to enhance the performance of pose estimation [9, 25, 60, 91], pedestrian detection [4, 62, 63, 76], person re-identification [42, 86, 101] (ReID), and many other human-centered tasks. These significant progress play a key role in advancing the applications of vi" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 239, + 545, + 392 + ], + "blocks": [ + { + "bbox": [ + 310, + 239, + 545, + 392 + ], + "lines": [ + { + "bbox": [ + 310, + 239, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 310, + 239, + 545, + 392 + ], + "type": "image", + "image_path": "3fab8d845b8f674e9d2a7a5d35aaf6f8bd489759a0b3796aa02fcfffc21487e6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 391, + 545, + 412 + ], + "lines": [ + { + "bbox": [ + 306, + 391, + 545, + 412 + ], + "spans": [ + { + "bbox": [ + 306, + 391, + 545, + 412 + ], + "type": "text", + "content": "Figure 1. UniHCP unifies 5 human-centric tasks under one model and is trained on a massive collection of human-centric datasets." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 436, + 545, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 436, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 305, + 436, + 545, + 460 + ], + "type": "text", + "content": "sual models in numerous fields, such as sports analysis [11], autonomous driving [97], and electronic retailing [27]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 461, + 546, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 461, + 546, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 461, + 546, + 700 + ], + "type": "text", + "content": "Although different human-centric perception tasks have their own relevant semantic information to focus on, those semantics all rely on the same basic structure of the human body and the attributes of each body part [64, 81]. In light of this, there have been some attempts trying to exploit such homogeneity and train a shared neural network jointly with distinct human-centric tasks [28,29,46,48,61,71,77,87,98]. For instance, human parsing has been trained in conjunction with human keypoint detection [46, 61, 98], pedestrian attribute recognition [87], pedestrian detection [48] or person re-identification [28]. The experimental results of these works empirically validate that some human-centric tasks may benefit each other when trained together. Motivated by these works, a natural expectation is that a more versatile all-in-one model could be a feasible solution for general human-centric perceptions, which can utilize the homogeneity of human-centric tasks for improving performance, enable fast adaption to new tasks, and decrease the burden of memory cost in large-scale multitask system deployment compared with specific models to specific tasks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "content": "However, unifying distinct human-centric tasks into a" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 693, + 127, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 693, + 127, + 703 + ], + "spans": [ + { + "bbox": [ + 58, + 693, + 127, + 703 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 59, + 703, + 135, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 135, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 135, + 712 + ], + "type": "text", + "content": "† Corresponding author." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17840" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 216 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 216 + ], + "type": "text", + "content": "general model is challenging considering the data diversity and output structures. From the data's perspective, images in different human-centric tasks and different datasets have different resolutions and characteristics (e.g., day and night, indoor and outdoor), which calls for a robust representative network with the capability to accommodate them. From the perspective of output, the annotations and expected outputs of different human-centric tasks have distinct structures and granularities. Although this challenge can be bypassed via deploying separate output heads for each task/dataset, it is not scalable when the number of tasks and datasets is large." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 218, + 289, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 218, + 289, + 494 + ], + "spans": [ + { + "bbox": [ + 46, + 218, + 289, + 494 + ], + "type": "text", + "content": "In this work, we aim to explore a simple, scalable formulation for unified human-centric system and, for the first time, propose a Unified model for Human-Centric Perceptions (UniHCP). As shown in Figure.1, UniHCP unifies and simultaneously handles five distinct human-centric tasks, namely, pose estimation, semantic part segmentation, pedestrian detection, ReID, and person attribute recognition. Motivated by the extraordinary capacity and flexibility of the vision transformers [43, 94], a simple yet unified encoder-decoder architecture with the plain vision transformer is employed to handle the input diversity, which works in a simple feedforward and end-to-end manner, and can be shared across all human-centric tasks and datasets to extract general human-centric knowledge. To generate the output for different tasks with the unified model, UniHCP defines Task-specific Queries, which are shared among all datasets with the same task definition and interpreted into different output units through a Task-guided Interpreter shared across different datasets and tasks. With task-specific queries and the versatile interpreter, UniHCP avoids the widely used task-specific output heads, which minimizes task-specific parameters for knowledge sharing and make backbone-encoded features reusable across tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 496, + 289, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 496, + 289, + 628 + ], + "spans": [ + { + "bbox": [ + 46, + 496, + 289, + 628 + ], + "type": "text", + "content": "Own to these designs, UniHCP is suitable and easy to perform multitask pretraining at scale. To this end, we pretrained an UniHCP model on a massive collection of 33 labeled human-centric datasets. By harnessing the abundant supervision signals of each task, we show such a model can simultaneously handle these in-pretrain tasks well with competitive performance compared to strong baselines relying on specialized architectures. When adapted to a specific task, both in-domain and downstream, our model achieves new SOTAs on several human-centric task benchmarks. In summary, the proposed model has the following properties:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 630, + 286, + 713 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 57, + 630, + 286, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 630, + 286, + 653 + ], + "spans": [ + { + "bbox": [ + 57, + 630, + 286, + 653 + ], + "type": "text", + "content": "- Unifying five distinct human-centric tasks and handling them simultaneously." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 654, + 286, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 654, + 286, + 677 + ], + "spans": [ + { + "bbox": [ + 58, + 654, + 286, + 677 + ], + "type": "text", + "content": "- Shared encoder-decoder network based on plain transformer." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 678, + 277, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 678, + 277, + 689 + ], + "spans": [ + { + "bbox": [ + 58, + 678, + 277, + 689 + ], + "type": "text", + "content": "- Simple task-specific queries identifying the outputs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 689, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 689, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 689, + 286, + 713 + ], + "type": "text", + "content": "Maximum weight sharing (99.97% shared parameters) with a task-guided interpreter." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 317, + 72, + 545, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 545, + 96 + ], + "type": "text", + "content": "- Trainable at scale and demonstrates competitive performance compared to task-specialized models." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 108, + 398, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 108, + 398, + 121 + ], + "spans": [ + { + "bbox": [ + 306, + 108, + 398, + 121 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 128, + 460, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 128, + 460, + 141 + ], + "spans": [ + { + "bbox": [ + 306, + 128, + 460, + 141 + ], + "type": "text", + "content": "2.1. Human-Centric Perceptions" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 147, + 545, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 147, + 545, + 302 + ], + "spans": [ + { + "bbox": [ + 304, + 147, + 545, + 302 + ], + "type": "text", + "content": "Human-centric perceptions are essential for substantial real-world applications. Depending on the targeted visual concept, the way of decoding output from image features varies across tasks. Specifically, pose estimation and pedestrian detection are both localization tasks that can be solved by either regression-based methods [37, 96] or heatmap-based methods [33, 34, 84]. Human parsing, as a fine-grained segmentation problem, is usually solved by per-pixel classification. While contour-based methods [65, 85] can also obtain segmentation masks, it requires instance-level mask annotations, which are not always available. PAR is treated as a multi-label classification task [104], and ReID is treated as a feature learning task [74]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 303, + 545, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 303, + 545, + 411 + ], + "spans": [ + { + "bbox": [ + 304, + 303, + 545, + 411 + ], + "type": "text", + "content": "Recently, several transformer-based solutions have been proposed for these human-centric tasks, with attention block designs on both backbone [19, 88, 93] and decoding network [40, 44, 54, 59, 87, 102]. However, these methods involve different task-specific designs and thus cannot be integrated into one model seamlessly. Built upon the general success of these works, we take a further step and unify human-centric tasks under the same architecture based on plain vision transformer." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 418, + 400, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 418, + 400, + 430 + ], + "spans": [ + { + "bbox": [ + 306, + 418, + 400, + 430 + ], + "type": "text", + "content": "2.2. Unified Models" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 437, + 545, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 545, + 581 + ], + "type": "text", + "content": "A general-purpose model that can handle different tasks in a unified manner has long been a coveted alternative to models specifically tailored for different tasks. Pioneering works regarding Natural Language Processing (NLP) [66], vision-language [58], and basic vision tasks [30, 68] have shown the effectiveness of such kind of unified cross-task models. ExT5 [2] and OFA [80] further provide a degree of promise for the performance benefits of large-scale multitask co-training. Among models supporting visual tasks, UniHead [45] and UViM [31] propose a unified architecture for several vision tasks. However, they are only trained and evaluated in a single-task manner." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": "For methods supporting multitask co-training, UniPerceiver [106] focuses on tasks in which the desired output is inherently language or labels, which does not fit human-centric tasks. While UniT [21], OFA [80], Unified-IO [57], and Pix2Seq v2 [6] further extend the support for detection, keypoint detection, segmentation, and many other visual tasks, they rely on independent decoder heads [21, 80] or autoregressive modeling [6, 57]. These works do not focus on human-centric vision tasks. Differently, our work introduces a shared decoder head (task-guided interpreter) in a parallelly feedforward manner for human-centric vision" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17841" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 69, + 542, + 255 + ], + "blocks": [ + { + "bbox": [ + 52, + 69, + 542, + 255 + ], + "lines": [ + { + "bbox": [ + 52, + 69, + 542, + 255 + ], + "spans": [ + { + "bbox": [ + 52, + 69, + 542, + 255 + ], + "type": "image", + "image_path": "c28ecb5570196684660c26e04b734560a99de1f3e274c086614322542dc3c062.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 262, + 547, + 285 + ], + "lines": [ + { + "bbox": [ + 46, + 262, + 547, + 285 + ], + "spans": [ + { + "bbox": [ + 46, + 262, + 547, + 285 + ], + "type": "text", + "content": "Figure 2. UniHCP handles a massive collection of human-centric tasks uniformly by task-specific queries and a task-guided interpreter, all predictions are yielded in parallel through a simple encoder-decoder transformer architecture." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 62, + 323, + 272, + 414 + ], + "blocks": [ + { + "bbox": [ + 99, + 304, + 235, + 314 + ], + "lines": [ + { + "bbox": [ + 99, + 304, + 235, + 314 + ], + "spans": [ + { + "bbox": [ + 99, + 304, + 235, + 314 + ], + "type": "text", + "content": "Table 1. Network details of UniHCP" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 62, + 323, + 272, + 414 + ], + "lines": [ + { + "bbox": [ + 62, + 323, + 272, + 414 + ], + "spans": [ + { + "bbox": [ + 62, + 323, + 272, + 414 + ], + "type": "table", + "html": "
LayersDimensionParams
Encoder1276891.1M
Decoder925614.5M
Task-guided Interpreter3.5M
Task-specific queries256<0.03M
Total109.1M
Task-agnostic params / total params99.97%
", + "image_path": "c792d17e91a9749b43b6aea1ebd7a1db1218cb86a9aa82857160564a442fd19e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 436, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 436, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 436, + 287, + 460 + ], + "type": "text", + "content": "tasks, which is simple yet maximizes the parameter sharing among different tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 461, + 287, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 461, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 46, + 461, + 287, + 546 + ], + "type": "text", + "content": "In the case of human-centric tasks, many works have shown great success by co-training a pair of human-centric tasks [28, 29, 46, 48, 61, 71, 77, 87, 98]. However, there is no work exploring a general unified model that can handle all representative human-centric tasks. Our work is the first attempt at designing, training, and evaluating a unified human-centric model with a large-scale multitask setting." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 560, + 107, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 560, + 107, + 572 + ], + "spans": [ + { + "bbox": [ + 47, + 560, + 107, + 572 + ], + "type": "text", + "content": "3. UniHCP" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": "To share the most knowledge among various human-centric tasks, we attempt to maximize weight sharing among all tasks in UniHCP. Specifically, our UniHCP, as shown in Figure 2, consists of three components: (1) A task-agnostic transformer encoder " + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": " to extract image features. (2) A transformer decoder " + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": " that attends to task-specific information according to task-specific queries " + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{Q}^t\\}" + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": " denotes a specific task. (3) A task-guided interpreter " + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": " produces output units, in which we decompose the output of multiple human-centric perception tasks into sharable units of diverse granularities, i.e., feature representation, lo" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 305, + 545, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 305, + 545, + 389 + ], + "spans": [ + { + "bbox": [ + 304, + 305, + 545, + 389 + ], + "type": "text", + "content": "cal probability map, global probability, bounding box coordinates. Since only the queries to the decoders are not shared among tasks, we can learn human-centric knowledge across different granularities by the designed interpreters and achieve maximum parameter sharing among all tasks, i.e., " + }, + { + "bbox": [ + 304, + 305, + 545, + 389 + ], + "type": "inline_equation", + "content": "99.97\\%" + }, + { + "bbox": [ + 304, + 305, + 545, + 389 + ], + "type": "text", + "content": " shared parameters, as shown in Table 1. The pipeline for our UniHCP is described as follows." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "text", + "content": "Step 1: Given an image " + }, + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "text", + "content": " sampled from the dataset in task " + }, + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "text", + "content": ", extract encoded features " + }, + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "text", + "content": " by the task-agnostic transformer encoder " + }, + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 389, + 545, + 425 + ], + "type": "text", + "content": " (Sec. 3.1)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 425, + 545, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 425, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 545, + 460 + ], + "type": "text", + "content": "Step 2: A transformer decoder " + }, + { + "bbox": [ + 304, + 425, + 545, + 460 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 425, + 545, + 460 + ], + "type": "text", + "content": " with task-specific queries " + }, + { + "bbox": [ + 304, + 425, + 545, + 460 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 304, + 425, + 545, + 460 + ], + "type": "text", + "content": " extracts task-specific features from encoded features " + }, + { + "bbox": [ + 304, + 425, + 545, + 460 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 304, + 425, + 545, + 460 + ], + "type": "text", + "content": " (Sec. 3.2)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "text", + "content": "Step 3: Generate output units according to the queried task, i.e., attended features " + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_f" + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "text", + "content": ", local probability map " + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_m" + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "text", + "content": ", global probability " + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_p" + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "text", + "content": " and bounding box coordinates " + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_{bbox}" + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "text", + "content": " by a task-guided interpreter " + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "text", + "content": " (Sec. 3.3). For example, for human parsing, two units: local probability map " + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_m" + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "text", + "content": " (for semantic part segmentation) and global probability " + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_p" + }, + { + "bbox": [ + 304, + 461, + 545, + 544 + ], + "type": "text", + "content": " (for existence of body part in the image), are generated." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "text", + "content": "Step 4: Calculate the loss of the corresponding task for optimizing the encoder " + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "text", + "content": ", the decoder " + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "text", + "content": ", the task-specific queries " + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "text", + "content": " and task-guided interpreter " + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 304, + 545, + 545, + 594 + ], + "type": "text", + "content": " by backward propagation (Sec. 3.4)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 601, + 499, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 601, + 499, + 613 + ], + "spans": [ + { + "bbox": [ + 305, + 601, + 499, + 613 + ], + "type": "text", + "content": "3.1. Task-agnostic Transformer Encoder" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 619, + 545, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 619, + 545, + 691 + ], + "spans": [ + { + "bbox": [ + 304, + 619, + 545, + 691 + ], + "type": "text", + "content": "UniHCP uses a plain Vision Trasnformer [14] (ViT) as the encoder. To handle input images of different resolutions, we use a shared learnable positional embedding with the size of " + }, + { + "bbox": [ + 304, + 619, + 545, + 691 + ], + "type": "inline_equation", + "content": "84 \\times 84" + }, + { + "bbox": [ + 304, + 619, + 545, + 691 + ], + "type": "text", + "content": " and interpolate it based on the spatial size of the input image after patch projection. The encoded feature " + }, + { + "bbox": [ + 304, + 619, + 545, + 691 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 304, + 619, + 545, + 691 + ], + "type": "text", + "content": " can be mathematically calculated as" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 391, + 701, + 545, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 391, + 701, + 545, + 714 + ], + "type": "interline_equation", + "content": "\\mathbf {F} = E (\\mathbf {X}, \\mathbf {P} _ {E}), \\tag {1}", + "image_path": "c96b8bebc8b59dc77f87a1abb004e78381db711ea7358b0eb320261cee6b61f4.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "17842" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_E" + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": " is the positional embedding after interpolation and " + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": " denotes the task-agnostic transformer encoder." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 102, + 234, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 102, + 234, + 117 + ], + "spans": [ + { + "bbox": [ + 47, + 102, + 234, + 117 + ], + "type": "text", + "content": "3.2. Decoder with Task-specific Queries" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 121, + 287, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 121, + 287, + 168 + ], + "spans": [ + { + "bbox": [ + 47, + 121, + 287, + 168 + ], + "type": "text", + "content": "To obtain the most discriminative feature for each task while maximizing knowledge sharing, we design task-specific queries to guide the transformer decoder only attending to task-relevant information." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 169, + 287, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 169, + 287, + 190 + ], + "spans": [ + { + "bbox": [ + 47, + 169, + 287, + 190 + ], + "type": "text", + "content": "Task-specific Queries. Task queries for task " + }, + { + "bbox": [ + 47, + 169, + 287, + 190 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 169, + 287, + 190 + ], + "type": "text", + "content": " are denoted as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 192, + 287, + 206 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 192, + 287, + 206 + ], + "spans": [ + { + "bbox": [ + 118, + 192, + 287, + 206 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} ^ {t} = \\left[ \\mathbf {q} _ {1} ^ {t}, \\mathbf {q} _ {2} ^ {t}, \\dots , \\mathbf {q} _ {N ^ {t}} ^ {t} \\right], \\tag {2}", + "image_path": "2943bc73917e284f6e7b6ada446c814367abb81d7e25b5743294c6a112caac0c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "inline_equation", + "content": "N^t" + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "text", + "content": " denotes the number of queries representing " + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "inline_equation", + "content": "N^t" + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "text", + "content": " different semantic meanings in task " + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "text", + "content": ". For pedestrian attribute recognition, pose estimation, human parsing, and ReID, the number of queries respectively equals to the number of attributes, the number of pose joints, the number of semantic parsing classes, and the length of desired ReID features. For pedestrian detection, we follow the implementation in [82], with details provided in the supplementary material. We randomly initialize the task-specific query " + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "text", + "content": " as learnable embeddings " + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_0^t" + }, + { + "bbox": [ + 46, + 209, + 287, + 339 + ], + "type": "text", + "content": " and refine it with the following decoder blocks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "spans": [ + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "text", + "content": "Following the common practice as in [8, 78, 82], all " + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "text", + "content": " are also associated with a positional embedding " + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_p^t" + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "text", + "content": ", which has the same dimension as " + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "text", + "content": " and is not shared across tasks. Different from " + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "text", + "content": " that will be progressively refined in the decoder blocks, " + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_p^t" + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "text", + "content": " is shared across decoder blocks. For tasks other than pedestrian detection, " + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_p^t" + }, + { + "bbox": [ + 46, + 341, + 287, + 436 + ], + "type": "text", + "content": " is simply a learnable positional embedding that is randomly initialized. For pedestrian detection, we have" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 129, + 443, + 287, + 458 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 443, + 287, + 458 + ], + "spans": [ + { + "bbox": [ + 129, + 443, + 287, + 458 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} _ {p} ^ {t} = \\operatorname {p r o j} (\\mathcal {A} _ {\\mathbf {Q}}), \\tag {3}", + "image_path": "a1530bab8a9ac56863805ea98d72dadf5ed92351cf0113eb0652defd0f19bd37.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathbf{Q}} \\in \\mathbb{R}^{N^t \\times 2}" + }, + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "type": "text", + "content": " refers to " + }, + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "type": "inline_equation", + "content": "N^t" + }, + { + "bbox": [ + 46, + 465, + 287, + 525 + ], + "type": "text", + "content": " learnable anchor points that are initialized with a uniform distribution following [82], and proj is a projection from coordinates to positional embeddings (more details about the projector are elaborated in the supplementary materials)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "text", + "content": "Decoder. The transformer decoder aims to attend to task-specific features according to the task queries. We follow the standard design of transformer decoders [78]. In the decoder, each transformer block " + }, + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "inline_equation", + "content": "D_{l}" + }, + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "inline_equation", + "content": "l = 1,2,\\dots,L" + }, + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "text", + "content": " consists of a cross-attention module, a self-attention module, and a feed-forward module (FFN), where " + }, + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "text", + "content": " denotes the number of transformer blocks. We place cross-attention before self-attention as adopted by [8, 36]. For each block " + }, + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "inline_equation", + "content": "D_{l}" + }, + { + "bbox": [ + 46, + 526, + 287, + 645 + ], + "type": "text", + "content": ", we attend to task-specific information from the encoded feature by task queries, which can be formulated as" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 652, + 287, + 667 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 652, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 107, + 652, + 287, + 667 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} _ {l} ^ {t} = D _ {l} \\left(\\mathbf {Q} _ {l - 1} ^ {t}, \\mathbf {Q} _ {p} ^ {t}, \\mathbf {F}, \\mathbf {F} _ {p}\\right), \\tag {4}", + "image_path": "a96d1ca3c9e7900e6ecf4bd292c6f97d7087ced544609464ab93136a4a317363.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 117, + 673, + 287, + 686 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 673, + 287, + 686 + ], + "spans": [ + { + "bbox": [ + 117, + 673, + 287, + 686 + ], + "type": "interline_equation", + "content": "\\text {w h e r e} \\mathbf {F} _ {p} = \\operatorname {p r o j} \\left(\\mathcal {A} _ {\\mathbf {F}}\\right), \\tag {5}", + "image_path": "0554e6946e1f914e2a9442178973143ee2d66734b17e7bfc85104ae465fa3fa9.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathbf{F}} \\in \\mathbb{R}^{H_{\\mathbf{F}}W_{\\mathbf{F}} \\times 2}" + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": " is the coordinates with respect to the original image for all feature tokens in " + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{F} \\in R^{H_{\\mathbf{F}} \\times W_{\\mathbf{F}}}" + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": ". For" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 326, + 72, + 527, + 211 + ], + "blocks": [ + { + "bbox": [ + 326, + 72, + 527, + 211 + ], + "lines": [ + { + "bbox": [ + 326, + 72, + 527, + 211 + ], + "spans": [ + { + "bbox": [ + 326, + 72, + 527, + 211 + ], + "type": "image", + "image_path": "54b6ebfc20063bdd2d7f04640950468549fc36c10631988d0202330b8e89a5fc.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "lines": [ + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "text", + "content": "Figure 3. Task-guided interpreter. " + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "inline_equation", + "content": "\\otimes" + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "text", + "content": " denotes a dynamic convolution module [7] that takes the projected query feature as the kernel and takes the tokens " + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "text", + "content": " from the encoder as the feature map, where " + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "text", + "content": " is upscaled to the desired resolution " + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "inline_equation", + "content": "H^{\\prime} \\times W^{\\prime}" + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "text", + "content": " denotes addition, for which the inputs are the projected query feature in the format of " + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "inline_equation", + "content": "[\\nabla cx, \\nabla cx, h, w]" + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\mathbf{Q}}" + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "text", + "content": ", which contains the anchor point " + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "inline_equation", + "content": "[cx, cy]" + }, + { + "bbox": [ + 305, + 213, + 545, + 289 + ], + "type": "text", + "content": " (see supplementary materials for details)." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "text", + "content": "the cross-attention in the decoder " + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "D_{l}" + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "text", + "content": ", the query is " + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{Q}}_l^t = \\mathbf{Q}_{l - 1}^t +\\mathbf{Q}_p^t" + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "text", + "content": ", the key is " + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{K}} = \\mathbf{F}' + \\mathbf{F}_p" + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "text", + "content": ", and the value is " + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{V}} = \\mathbf{F}'" + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "\\mathbf{F}'" + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "text", + "content": " is linearly projected from the features of the encoder " + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "text", + "content": " to align channel dimensions. The result of cross-attention is then passed for self-attention in " + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "D_{l}" + }, + { + "bbox": [ + 305, + 308, + 545, + 373 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 380, + 441, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 380, + 441, + 393 + ], + "spans": [ + { + "bbox": [ + 306, + 380, + 441, + 393 + ], + "type": "text", + "content": "3.3. Task-guided Interpreter" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 398, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 398, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 305, + 398, + 545, + 434 + ], + "type": "text", + "content": "Task-guided interpreter " + }, + { + "bbox": [ + 305, + 398, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 305, + 398, + 545, + 434 + ], + "type": "text", + "content": " interprets query tokens " + }, + { + "bbox": [ + 305, + 398, + 545, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 305, + 398, + 545, + 434 + ], + "type": "text", + "content": " into four output units subject to the request of a specific task. As shown in Figure 3, these four output units are as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 353, + 452, + 493, + 467 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 452, + 493, + 467 + ], + "spans": [ + { + "bbox": [ + 353, + 452, + 493, + 467 + ], + "type": "interline_equation", + "content": "f e a t u r e \\left. \\mathbf {Y} _ {f} \\in \\mathbb {R} ^ {N ^ {t} \\times C} \\right.", + "image_path": "8032a945235d38383b51958fb0808d10e6be99e3e69af85b9e1316ecb255951a.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 337, + 470, + 545, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 470, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 337, + 470, + 545, + 491 + ], + "type": "interline_equation", + "content": "\\text {g l o b a l p r o b a b i l i t y u n i t}: \\mathbf {Y} _ {p} \\in \\mathbb {R} ^ {N ^ {t} \\times 1}", + "image_path": "ffefcd8971e642a86c6af4202eecbb3c4afbb2b1a3a742462a3f10821c8e65b0.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 323, + 487, + 515, + 502 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 487, + 515, + 502 + ], + "spans": [ + { + "bbox": [ + 323, + 487, + 515, + 502 + ], + "type": "interline_equation", + "content": "\\text {l o c a l p r o b a b i l i t y m a p u n i t}: \\mathbf {Y} _ {m} \\in \\mathbb {R} ^ {N ^ {t} \\times H ^ {\\prime} \\times W ^ {\\prime}}", + "image_path": "129c7167259335326b990c4584827472ba028694ce6faf457a2c5b0056c0a621.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 352, + 506, + 504, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 506, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 352, + 506, + 504, + 520 + ], + "type": "interline_equation", + "content": "\\text {b o u n d i n g b o x u n i t}: \\mathbf {Y} _ {b b o x} \\in \\mathbb {R} ^ {N ^ {t} \\times 4},", + "image_path": "99c1d0e535c5c3638fb098641f6c6384c58f29ed3c492c1f6a6df19d1560f36e.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "spans": [ + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "text", + "content": " is the output dimension of the decoder, " + }, + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "inline_equation", + "content": "H^{\\prime}\\times W^{\\prime}" + }, + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "text", + "content": " denotes the desired resolution for the local probability map. Given task " + }, + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "text", + "content": " and output interpreter " + }, + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 305, + 528, + 545, + 575 + ], + "type": "text", + "content": ", the output of the UniHCP is defined as follows:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 318, + 584, + 545, + 598 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 584, + 545, + 598 + ], + "spans": [ + { + "bbox": [ + 318, + 584, + 545, + 598 + ], + "type": "interline_equation", + "content": "\\left\\{\\mathbf {Y} _ {u} \\mid g _ {u} ^ {\\mathbf {t} _ {t}} = 1, u \\in \\{f, p, m, b b o x \\} \\right\\} = \\mathcal {I} \\left(\\mathbf {Q} ^ {t}, \\mathbf {g} ^ {\\mathbf {t} _ {t}}\\right), \\tag {7}", + "image_path": "847654bc5e300837ed14cf5d4fef8cc2e1e1c01a962b2210591ad57611b6920b.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "spans": [ + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_t \\in \\{\\text{reid}, \\dots, \\text{pose}\\}" + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "text", + "content": " denotes the task type of task " + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "inline_equation", + "content": "\\mathbf{g}^{\\mathbf{t}} = \\{g_u^{\\mathbf{t}}\\}" + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "text", + "content": " is a set of task-specific binary gates (" + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "inline_equation", + "content": "g \\in \\{0,1\\}" + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "text", + "content": ") that represents the desired output units for task type " + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 305, + 605, + 545, + 642 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "type": "text", + "content": "Guidance from tasks to output units. For human parsing, local probability map (for semantic part segmentation) and global probability (for existence of body part in the image) are activated, corresponding to " + }, + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "type": "inline_equation", + "content": "g_{m}^{seg} = 1" + }, + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "type": "inline_equation", + "content": "g_{p}^{seg} = 1" + }, + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "type": "text", + "content": " respectively. For person ReID, feature vectors are used, corresponding to " + }, + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "type": "inline_equation", + "content": "g_{f}^{reid} = 1" + }, + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "type": "text", + "content": ". For pose estimation, " + }, + { + "bbox": [ + 305, + 642, + 545, + 715 + ], + "type": "inline_equation", + "content": "g_{m}^{pose} = 1" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17843" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "(for localizing key points) and " + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "inline_equation", + "content": "g_{p}^{pose} = 1" + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": " (for existence of keypoints in the image). For detection, " + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "inline_equation", + "content": "g_{bbox}^{det} = 1" + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": " (for bounding box prediction) and " + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "inline_equation", + "content": "g_{p}^{det} = 1" + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": " (for existence of object). For pedestrian attribute prediction, " + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "inline_equation", + "content": "g_{p}^{par} = 1" + }, + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": " (for existence of attributes in the image). Therefore, the output unit of global probabilities is shared among pose estimation, human parsing, pedestrian detection, and attribute recognition. The output unit of local probability maps is shared among pose estimation and human parsing." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 181, + 289, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 181, + 289, + 456 + ], + "spans": [ + { + "bbox": [ + 46, + 181, + 289, + 456 + ], + "type": "text", + "content": "Discussion. The task-guided interpreter interprets each query token independently. Previous works focused on autoregressive decoding with tokenization [6, 57] or task-specific heads [21, 92] to handle different output units required by specific tasks. In contrast, the task-guided interpreter can handle tasks involving a varying number of classes, yield all results in parallel, and do not require task-specific heads. This is achieved by two designs in our UniHCP framework: 1) Class/instance information is self-contained in queries. As mentioned in Section 3.2, a query represents a particular semantic class in pose estimation, attribute prediction, human parsing, and pedestrian detection. We only need to retrieve a scalar probability value from a query to obtain the confidence information for a particular class/human instance. 2) Outputs of the same modality share the same output unit. For example, the heatmap for a particular joint in pose estimation and the heatmap for a particular body part in human parsing have the same dimension. Although these outputs have different meanings, experimental results in Section 4.3 show that it is suitable to obtain them through the same output unit and fully let the task-specific queries handle the differences in preferred information to be represented." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 463, + 165, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 463, + 165, + 475 + ], + "spans": [ + { + "bbox": [ + 47, + 463, + 165, + 475 + ], + "type": "text", + "content": "3.4. Objective Functions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 481, + 287, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 481, + 287, + 540 + ], + "spans": [ + { + "bbox": [ + 46, + 481, + 287, + 540 + ], + "type": "text", + "content": "In this section, we will introduce the objective functions for training diverse human-centric tasks together and illustrate how these objectives are related to the output units defined in Eq. 6. Unless otherwise specified, we omit the GT inputs in loss functions for brevity." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "text", + "content": "Overall Objective Function. Given a collection of datasets " + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "inline_equation", + "content": "\\mathbb{D} = \\{\\mathcal{D}|\\mathbf{t}_{\\mathcal{D}}\\in \\{\\text{reid},\\ldots ,\\text{pose}\\} \\}" + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_{\\mathcal{D}}" + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "text", + "content": " denotes the task type of dataset " + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "text", + "content": ", we also note " + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "inline_equation", + "content": "t_{\\mathcal{D}}" + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "text", + "content": " as the task of dataset " + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 46, + 541, + 287, + 588 + ], + "type": "text", + "content": ", we have the overall loss defined as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 98, + 594, + 287, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 594, + 287, + 620 + ], + "spans": [ + { + "bbox": [ + 98, + 594, + 287, + 620 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\sum_ {\\mathcal {D} \\in \\mathbb {D}} w _ {D} \\mathcal {L} _ {\\mathbf {t} _ {\\mathcal {D}}} \\left(\\mathcal {I} \\left(\\mathbf {Q} ^ {t _ {\\mathcal {D}}}, \\mathbf {g} ^ {\\mathbf {t} _ {\\mathcal {D}}}\\right)\\right), \\tag {8}", + "image_path": "1e5b4ec8a44982100b1dd6dc7c25cbc68eca9cb32bb77df067c87f8844a5e942.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 627, + 287, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 627, + 287, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 627, + 287, + 664 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 627, + 287, + 664 + ], + "type": "inline_equation", + "content": "w_{\\mathcal{D}}" + }, + { + "bbox": [ + 46, + 627, + 287, + 664 + ], + "type": "text", + "content": " is the loss weight for dataset " + }, + { + "bbox": [ + 46, + 627, + 287, + 664 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 46, + 627, + 287, + 664 + ], + "type": "text", + "content": ", which is calculated based on the task type and batch size (calculations are elaborated in supplementary materials)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "ReID. Person ReID is a feature learning task for extracting identification information. Therefore, we directly supervised the features after the decoder by identity annotations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "type": "text", + "content": "Specifically, for ReID task, the extracted feature is a simple concatenation of all feature vectors " + }, + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_f = [y_f^1;\\dots ;y_f^{N^t}]" + }, + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "type": "inline_equation", + "content": "N^t = 6" + }, + { + "bbox": [ + 304, + 72, + 545, + 121 + ], + "type": "text", + "content": " by default. The loss function is a combination of ID loss [103] and triplet loss [52] written as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 353, + 137, + 545, + 152 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 137, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 353, + 137, + 545, + 152 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {r e i d}} = \\mathcal {L} _ {I D} (\\mathbf {Y} _ {f}) + \\mathcal {L} _ {\\text {t r i p l e t}} (\\mathbf {Y} _ {f}). \\tag {9}", + "image_path": "3774628867e9e8bd8b21ce594b75c48e776887ca1efaf6ed6f7d78f0fe63d49c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 171, + 545, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 171, + 545, + 254 + ], + "spans": [ + { + "bbox": [ + 304, + 171, + 545, + 254 + ], + "type": "text", + "content": "PAR. Pedestrian attribute recognition only predicts whether an attribute exists in the global image. Therefore, we only supervise the output unit of global probabilities " + }, + { + "bbox": [ + 304, + 171, + 545, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_p" + }, + { + "bbox": [ + 304, + 171, + 545, + 254 + ], + "type": "text", + "content": " from the task-guided interpreter. Specifically, following the common practice [40, 75], we adopt the weighted binary cross-entropy loss. Given the probability predictions " + }, + { + "bbox": [ + 304, + 171, + 545, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_p" + }, + { + "bbox": [ + 304, + 171, + 545, + 254 + ], + "type": "text", + "content": " associated with " + }, + { + "bbox": [ + 304, + 171, + 545, + 254 + ], + "type": "inline_equation", + "content": "N^t" + }, + { + "bbox": [ + 304, + 171, + 545, + 254 + ], + "type": "text", + "content": " attributes, we have:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 266, + 545, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 266, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 317, + 266, + 545, + 323 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {p a r} = \\sum_ {n = 1} ^ {N _ {t}} w _ {n} \\left(y _ {n} \\log \\left(y _ {p} ^ {n}\\right) + \\left(1 - y _ {n}\\right) \\log \\left(1 - y _ {p} ^ {n}\\right)\\right), \\\\ w _ {n} = y _ {n} e ^ {1 - \\gamma_ {n}} + (1 - y _ {n}) e ^ {\\gamma_ {n}}, \\tag {10} \\\\ \\end{array}", + "image_path": "b66cbb756fc82e475ea6c55a3b480fb13ff9bf712fc2fba809593f0c36515f3c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "inline_equation", + "content": "y_{n}" + }, + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "text", + "content": " denotes the annotation of " + }, + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "text", + "content": "-th attribute and " + }, + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "inline_equation", + "content": "\\gamma_{n}" + }, + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "text", + "content": " denotes the positive example ratio of " + }, + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 324, + 545, + 348 + ], + "type": "text", + "content": "-th attribute." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "text", + "content": "Human Parsing. Human parsing can be considered as semantic segmentation of human part. We view the presence of semantic classes as predictable attributes since the semantic classes are not always present in an image. Therefore, the global probability " + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_p" + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "text", + "content": " and local probability map " + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_m" + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "text", + "content": " are selected from the output units to describe whether a semantic part exists on the image level (global) and pixel level (local), respectively. Given a query " + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_l" + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "text", + "content": " defined in Eq. 2 which corresponds to a semantic class in human parsing, we adopt the binary cross entropy loss as " + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{par}" + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "text", + "content": " in pedestrian attribute recognition to constrain the global probability " + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_p" + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "text", + "content": ", and a combination of binary cross-entropy loss and dice loss [8] to supervised local probability map " + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_m" + }, + { + "bbox": [ + 304, + 369, + 545, + 525 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 320, + 536, + 531, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 536, + 531, + 550 + ], + "spans": [ + { + "bbox": [ + 320, + 536, + 531, + 550 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s e g} = \\lambda_ {p a r} \\mathcal {L} _ {p a r} (\\mathbf {Y} _ {p}) + \\mathcal {L} _ {b c e} (\\mathbf {Y} _ {m}) + \\mathcal {L} _ {d i c e} (\\mathbf {Y} _ {m}),", + "image_path": "8adaefcf584e2322052ceb6abf0e03a7ba3f1247ae6f8f7564fab09b9dc6c790.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 560, + 509, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 560, + 509, + 574 + ], + "spans": [ + { + "bbox": [ + 306, + 560, + 509, + 574 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 560, + 509, + 574 + ], + "type": "inline_equation", + "content": "\\lambda_{par}" + }, + { + "bbox": [ + 306, + 560, + 509, + 574 + ], + "type": "text", + "content": " denotes the loss weight for " + }, + { + "bbox": [ + 306, + 560, + 509, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{par}(\\mathbf{Y}_p)" + }, + { + "bbox": [ + 306, + 560, + 509, + 574 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 594, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 689 + ], + "type": "text", + "content": "Pose Estimation. We follow the common top-down setting for pose estimation, i.e., predicting keypoints based on the cropped human instances. We predict the heatmap w.r.t. the keypoints via mean-squared error. Similar to human parsing formulation, we also select the global probability " + }, + { + "bbox": [ + 304, + 594, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_p" + }, + { + "bbox": [ + 304, + 594, + 545, + 689 + ], + "type": "text", + "content": " and local probability map " + }, + { + "bbox": [ + 304, + 594, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_m" + }, + { + "bbox": [ + 304, + 594, + 545, + 689 + ], + "type": "text", + "content": " to predict whether a keypoint exists in the image level and pixel level, respectively. Mathematically, we have:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 345, + 700, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 700, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 345, + 700, + 545, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {p o s e}} = \\lambda_ {\\text {p a r}} \\mathcal {L} _ {\\text {p a r}} (\\mathbf {Y} _ {p}) + \\mathcal {L} _ {\\text {m s e}} (\\mathbf {Y} _ {m}). \\tag {11}", + "image_path": "68258c649a0924f09c6d4f72b4be9f15f3560f3f9c96e6a5c7ae2bec38c6845f.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17844" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": "Pedestrian Detection. Pedestrian Detection is a local prediction task but in a sparse manner. Following the widely adopted designs in end-to-end transformer-based detection [5, 102], ground-truth for " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "N^t" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": " query features in " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_l" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": " are determined by optimal bipartite matching between all " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "N^t" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": " predictions and GT boxes. Given output units " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_p" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_{bbox}" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": ", we adopt the identical cost formulation and loss as in [102]," + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 64, + 175, + 286, + 202 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 175, + 286, + 202 + ], + "spans": [ + { + "bbox": [ + 64, + 175, + 286, + 202 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\text {p e d d e t}} = \\lambda_ {\\text {c l s}} \\mathcal {L} _ {\\text {c l s}} (\\mathbf {Y} _ {p}) + \\lambda_ {i o u} \\mathcal {L} _ {i o u} (\\mathbf {Y} _ {b b o x}) + \\tag {12} \\\\ \\lambda_ {L 1} \\mathcal {L} _ {L 1} (\\mathbf {Y} _ {b b o x}). \\\\ \\end{array}", + "image_path": "2e9099919e63e880f7538f9f23e64281e3baebc3a8cfe636aeb1ff6c69c0ce66.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "spans": [ + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cls},\\mathcal{L}_{iou}" + }, + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{L1}" + }, + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "text", + "content": " are focal loss [50], GIoU loss [67], and " + }, + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "inline_equation", + "content": "L1" + }, + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "text", + "content": " loss, respectively. Their corresponding loss weights " + }, + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 47, + 210, + 287, + 247 + ], + "type": "text", + "content": " are also identically set as in [102]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 257, + 128, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 257, + 128, + 270 + ], + "spans": [ + { + "bbox": [ + 47, + 257, + 128, + 270 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 276, + 178, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 276, + 178, + 289 + ], + "spans": [ + { + "bbox": [ + 47, + 276, + 178, + 289 + ], + "type": "text", + "content": "4.1. Implementation details" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 295, + 287, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 295, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 295, + 287, + 437 + ], + "type": "text", + "content": "Datasets. To enable general human-centric perceptions, we pretrain the proposed UniHCP at scale on a massive and diverse collection of human-centric datasets. Specifically, the training splits of 33 publically available datasets are gathered to form the training set for UniHCP, including nine datasets for pose estimation and six datasets for ReID, Human Parsing, Attribute Prediction, Pedestrain Detection, seraprately. For ReID, there are two different subtasks: general ReID and cloth-changing ReID, where the difference is how cloth-changing is treated. We empirically found it is best to view them as different tasks with different task queries, hence, we opted for this setup." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 438, + 287, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 438, + 287, + 545 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 287, + 545 + ], + "type": "text", + "content": "We carefully follow the de-duplication practices as introduced in [95] to remove the samples that could appear in the evaluation datasets. We also remove images whose groundtruth labels are not given, leading to 2.3M distinct training samples in total. For evaluation, apart from the available validation or test splits of the 33 training sets, we also included several out-of-pretrain downstream datasets for each type of human-centric task. More details about dataset setups can be found in supplementary materials." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 545, + 287, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 287, + 653 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 287, + 653 + ], + "type": "text", + "content": "Training. We use the standard ViT-B [14] as the encoder network and initialize it with the MAE pretrained [18] weights following [43, 88]. For the main results, we use a batch size of 4324 in total, with the dataset-specific batch size being proportional to the size of each dataset. Unless otherwise specified, the image resolution used in pretraining is " + }, + { + "bbox": [ + 46, + 545, + 287, + 653 + ], + "type": "inline_equation", + "content": "256 \\times 192" + }, + { + "bbox": [ + 46, + 545, + 287, + 653 + ], + "type": "text", + "content": " for pose estimation and attribute prediction, " + }, + { + "bbox": [ + 46, + 545, + 287, + 653 + ], + "type": "inline_equation", + "content": "256 \\times 128" + }, + { + "bbox": [ + 46, + 545, + 287, + 653 + ], + "type": "text", + "content": " for ReID, " + }, + { + "bbox": [ + 46, + 545, + 287, + 653 + ], + "type": "inline_equation", + "content": "480 \\times 480" + }, + { + "bbox": [ + 46, + 545, + 287, + 653 + ], + "type": "text", + "content": " for human parsing, and a maximum height/width of 1333 for pedestrian detection." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "type": "text", + "content": "For computational efficiency, each GPU only runs one specific task, and each task can be evenly distributed to multiple GPUs whereas a single GPU is not capable of handling its workloads. To further save the GPU memory during the training time, we adopt the gradient checkpointing [3] in the" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 310, + 91, + 541, + 272 + ], + "blocks": [ + { + "bbox": [ + 312, + 71, + 538, + 82 + ], + "lines": [ + { + "bbox": [ + 312, + 71, + 538, + 82 + ], + "spans": [ + { + "bbox": [ + 312, + 71, + 538, + 82 + ], + "type": "text", + "content": "Table 2. Representative datasets used in multitask co-training." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 91, + 541, + 272 + ], + "lines": [ + { + "bbox": [ + 310, + 91, + 541, + 272 + ], + "spans": [ + { + "bbox": [ + 310, + 91, + 541, + 272 + ], + "type": "table", + "html": "
Task TypeDatasetsNumber of samples
ReLU (6 datasets)CUHK03 [41] PRCC [89]268,002
...
Pose Estimation (9 datasets)COCO-Pose [51] AI Challenger [83]1,261,749
...
Human Parsing (6 datasets)LIP [16] DeepFashion2 [15]384,085
...
Attribute Prediction (6 datasets)PA-100K [56] RAPv2 [35]242,880
...
Pedestrian Detection (6 datasets)COCO-Person [51] CrowdHuman [69]170,687
...
", + "image_path": "37bd6f6790b3ea34ee530d074c88e120b9739debc50f637733639ecf187ea00b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 283, + 545, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 283, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 304, + 283, + 545, + 331 + ], + "type": "text", + "content": "encoder forward pass among all tasks and additionally use accumulative gradients for detection tasks. Due to the high GPU-memory demand of detection datasets, the batch size for the detection task is timed by 0.6." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 331, + 546, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 546, + 474 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 546, + 474 + ], + "type": "text", + "content": "We use Adafactor [70] optimizer and follow the recommended modifications [94] for adopting it to ViT, we set " + }, + { + "bbox": [ + 304, + 331, + 546, + 474 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 304, + 331, + 546, + 474 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 331, + 546, + 474 + ], + "type": "inline_equation", + "content": "\\beta_{2}" + }, + { + "bbox": [ + 304, + 331, + 546, + 474 + ], + "type": "text", + "content": " clipped at 0.999, disables the parameter scaling and decoupled weight decay to 0.05. We linearly warm up the learning rate for the first 1.5k iterations to 1e-3, after which the learning rate is decayed to 0 following a cosine decay scheduler. We also use a drop-path rate of 0.2 and layer-wise learning rate decay [43, 88] of 0.75 in the ViT-B encoder. The whole training process takes 105k iterations which are approximately 130 epochs for detection datasets and 200 epochs for other datasets. The whole training takes 120 hours in total on 88 NVIDIA V100 GPUs." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 482, + 391, + 494 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 482, + 391, + 494 + ], + "spans": [ + { + "bbox": [ + 306, + 482, + 391, + 494 + ], + "type": "text", + "content": "4.2. Main Results" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 500, + 545, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 500, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 304, + 500, + 545, + 632 + ], + "type": "text", + "content": "To demonstrate the capability of UniHCP as a unified model for human-centric perceptions, we first evaluate our UniHCP on thirteen datasets that appear in the pretraining stage (in Section 4.2.1), e.g., CIHP. Furthermore, we employ five datasets whose training splits are not included in the pretraining stage to evaluate the cross-datasets transferability of UniHCP (in Section 4.2.2). We also demonstrate that UniHCP has the potential to efficiently transfer to new datasets that do not appear in pretraining with only a few images (in Section 4.2.3). For detailed evaluation configuration, please refer to the supplementary." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 647, + 456, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 647, + 456, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 647, + 456, + 658 + ], + "type": "text", + "content": "4.2.1 In-pretrain Dataset Results" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "We conduct extensive evaluations on thirteen in-pretrain datasets to demonstrate the effectiveness of our UniHCP. Table 3-7 summarize the evaluation results of UniHCP on five representative human-centric tasks, i.e., person ReID," + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17845" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 113, + 232, + 190 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 233, + 102 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 233, + 102 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 233, + 102 + ], + "type": "text", + "content": "Table 3. Person ReID evaluation on Market1501, MSMT, CUHK03 with mAP. †indicates using additional camera IDs." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 113, + 232, + 190 + ], + "lines": [ + { + "bbox": [ + 53, + 113, + 232, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 113, + 232, + 190 + ], + "type": "table", + "html": "
MethodMarket1501MSMT17CUHK03
HOReID [79]84.9--
MNE [39]--77.7
SAN [26]88.055.776.4
TransReID [19]86.861.0-
TransReID† [19]88.967.4-
UniHCP (direct eval)80.755.268.6
UniHCP (finetune)90.367.383.1
", + "image_path": "60f34a906ab3797651b05ad5ea71643946fb5dfbb2496a7b42c5f648a090d090.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 238, + 70, + 378, + 102 + ], + "lines": [ + { + "bbox": [ + 238, + 70, + 378, + 102 + ], + "spans": [ + { + "bbox": [ + 238, + 70, + 378, + 102 + ], + "type": "text", + "content": "Table 4. Pedestrian attribute recognition evaluation on PA-100K and RAPv2 test sets with mA." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "type": "table", + "bbox": [ + 242, + 113, + 376, + 191 + ], + "blocks": [ + { + "bbox": [ + 242, + 113, + 376, + 191 + ], + "lines": [ + { + "bbox": [ + 242, + 113, + 376, + 191 + ], + "spans": [ + { + "bbox": [ + 242, + 113, + 376, + 191 + ], + "type": "table", + "html": "
MethodPA-100KRAPv2
SSC [23]81.87-
C-Tran [32]81.53-
Q2L [54]80.72-
L2L [40]82.37-
DAFL [24]83.5481.04
UniHCP (direct eval)79.3277.20
UniHCP (finetune)86.1882.34
", + "image_path": "62b5360011bc08039a536906d5f5dac853980174832e2ecb70e933b762e55c36.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 388, + 113, + 539, + 193 + ], + "blocks": [ + { + "bbox": [ + 386, + 70, + 542, + 102 + ], + "lines": [ + { + "bbox": [ + 386, + 70, + 542, + 102 + ], + "spans": [ + { + "bbox": [ + 386, + 70, + 542, + 102 + ], + "type": "text", + "content": "Table 5. Human parsing evaluation on Human3.6M, LIP and CIHP val sets with mIoU." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 388, + 113, + 539, + 193 + ], + "lines": [ + { + "bbox": [ + 388, + 113, + 539, + 193 + ], + "spans": [ + { + "bbox": [ + 388, + 113, + 539, + 193 + ], + "type": "table", + "html": "
MethodH3.6MLIPCIHP
HCMOCO [20]62.50--
SNT [22]-54.7360.87
PCNet [99]-57.0361.05
SCHP [38]-59.36-
CDGNet [53]-60.3065.56
UniHCP (direct eval)65.9063.8068.60
UniHCP (finetune)65.9563.8669.80
", + "image_path": "6578489c4b2e49e8ab222109377b3d336bba5eb37a3ca6c0f70686f8f46615e3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 50, + 257, + 238, + 350 + ], + "blocks": [ + { + "bbox": [ + 47, + 204, + 240, + 237 + ], + "lines": [ + { + "bbox": [ + 47, + 204, + 240, + 237 + ], + "spans": [ + { + "bbox": [ + 47, + 204, + 240, + 237 + ], + "type": "text", + "content": "Table 6. Pedestrian detection evaluation on CrowdHuman val set. Compared with the SOTA, UniHCP achieves comparable mAP and better JI." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 257, + 238, + 350 + ], + "lines": [ + { + "bbox": [ + 50, + 257, + 238, + 350 + ], + "spans": [ + { + "bbox": [ + 50, + 257, + 238, + 350 + ], + "type": "table", + "html": "
MethodmAPMR-2(↓)JI
DETR [5]75.973.274.4
PEDR [49]91.643.783.3
Deformable-DETR [105]91.557.083.1
Sparse-RCNN [73]91.344.881.3
Iter-Deformable-DETR [102]92.141.584.0
Iter-Sparse-RCNN [102]92.541.683.3
UniHCP (direct eval)90.046.682.2
UniHCP (finetune)92.541.685.8
", + "image_path": "4b11ab791a467ca8c68106ed029239f8ec315a82164fb8a265e45a71b84da9c0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 250, + 257, + 539, + 351 + ], + "blocks": [ + { + "bbox": [ + 246, + 204, + 539, + 248 + ], + "lines": [ + { + "bbox": [ + 246, + 204, + 539, + 248 + ], + "spans": [ + { + "bbox": [ + 246, + 204, + 539, + 248 + ], + "type": "text", + "content": "Table 7. Pose estimation evaluation on COCO, Human3.6M, AI Challenge and OCHuman. Following [88], we report the results on COCO val set, Human3.6M, AI Challenge val set, and OCHuman test set. †denotes the results reported by MMPose [10]. ‡denotes the results achieved using multi-dataset training." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 250, + 257, + 539, + 351 + ], + "lines": [ + { + "bbox": [ + 250, + 257, + 539, + 351 + ], + "spans": [ + { + "bbox": [ + 250, + 257, + 539, + 351 + ], + "type": "table", + "html": "
MethodCOCO/mAPH3.6M/EPE(↓)AIC/mAPOCHuman/mAP
HRNet-w32† [72]74.49.4--
HRNet-w48† [72]75.17.4--
TokenPose-L/D24 [44]75.9---
HRFormer-B [93]75.6---
ViTPose-B [88]75.8---
ViTPose-B‡ [88]77.1-32.087.3
UniHCP (direct eval)76.16.932.587.4
UniHCP (finetune)76.56.633.6N/A
", + "image_path": "f5e41cdf8683469dee6025c3cd9e15914d265fa37d103c4627a997af7022811b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 372, + 287, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 372, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 46, + 372, + 287, + 468 + ], + "type": "text", + "content": "pedestrian attribute recognition, human parsing, pedestrian detection, and pose estimation. We report two kinds of evaluation results of our UniHCP: (1) direct evaluation, where the pre-trained model with cross-task shared encoder-decoder weights and task-specific queries are directly used for evaluation on the target dataset, and (2) finetuning, where the pretrained UniHCP are first finetuned with the train split of the target dataset and then evaluated." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 471, + 287, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 471, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 46, + 471, + 287, + 567 + ], + "type": "text", + "content": "As observed, the direct evaluation results of UniHCP show promising performance on most human-centric tasks, especially on human parsing and pose estimation tasks, which show better or on-par performance with the State-Of-The-Art (SOTA). The exception is the person ReID task, which observes noticeable performance gaps with the SOTA. We suggest this is due to its huge disparity from other tasks and can be remedied with quick finetuning." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": "With finetuning, our UniHCP achieves new SOTAs on nine out of the total twelve datasets and on par performance on the rest three datasets, even without task-specific design in architecture or task-specific priors, showing that UniHCP extracts complementary knowledge among human-centric tasks. Concretely, Table 4 shows that in the human attribute recognition task, UniHCP significantly surpasses previous SOTA DAFL [24] by " + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "+3.79\\%" + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": " mA on PA-100K and " + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "+1.20\\%" + }, + { + "bbox": [ + 46, + 570, + 288, + 714 + ], + "type": "text", + "content": " mA on RAPv2 datasets, respectively, which indicates that UniHCP well extracts the shared attribute information among using the output unit of global probabilities in the interpreter. Second, UniHCP also pushes" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 308, + 412, + 545, + 479 + ], + "blocks": [ + { + "bbox": [ + 305, + 370, + 545, + 404 + ], + "lines": [ + { + "bbox": [ + 305, + 370, + 545, + 404 + ], + "spans": [ + { + "bbox": [ + 305, + 370, + 545, + 404 + ], + "type": "text", + "content": "Table 8. Transfer performance on ATR [47], SenseReID [100], Caltech [13], MPII [1] and PETA [12]. Results with " + }, + { + "bbox": [ + 305, + 370, + 545, + 404 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 305, + 370, + 545, + 404 + ], + "type": "text", + "content": " are achieved by using additional data. DE - direct evaluation. FT - finetuning." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 412, + 545, + 479 + ], + "lines": [ + { + "bbox": [ + 308, + 412, + 545, + 479 + ], + "spans": [ + { + "bbox": [ + 308, + 412, + 545, + 479 + ], + "type": "table", + "html": "
MethodsParsingReLUDetectionPoseAttribute
ATRSenseReLUCaltech(↓)MPIIPETA
SOTA97.39 [53]34.6 [100]46.6 [17]92.3 [90]87.07 [24]
SOTA†--28.8 [17]93.3 [88]-
UniHCP (DE)-46.037.8--
UniHCP (FT)97.74N/A27.293.288.78
", + "image_path": "fee5d3dc4c1e0493be6ad0858328f7c4ba253f75811341b3ab3eb4a8dd460b99.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": "the performance of another important human task, i.e., human parsing, to a new level. Specifically, " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "+3.45\\%" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " mIoU, " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "+3.56\\%" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " mIoU, and " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "+4.24\\%" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " mIoU performance gains are observed on Human3.6M, LIP, and CIHP datasets, respectively. We suggest the newly-added global supervision " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{par}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " will help UniHCP to suppress the false prediction on not appeared semantic parts. UniHCP also shows superior performance to previous methods on pose estimation. On person ReID, UniHCP outperforms TransReid [19] on Market1501 and MNE [39] on CUHK03 without the help of any additional camera information and training images during evaluation. For pedestrian detection, our UniHCP achieves " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "+1.8\\%" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " JI performance gain compared with Iter-Deformable-DETR [102] and on-par performance with the Iter-Sparse-RCNN [102] on mAP. These strong performances on diverse datasets across five tasks demonstrate the feasibility and powerfulness of the unified human-centric model and large-scale pretraining." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17846" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 123, + 287, + 194 + ], + "blocks": [ + { + "bbox": [ + 47, + 71, + 288, + 115 + ], + "lines": [ + { + "bbox": [ + 47, + 71, + 288, + 115 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 288, + 115 + ], + "type": "text", + "content": "Table 9. One-shot human parsing and human pose estimation transfer results under different tuning settings. Every method uses only 1 image per class to transfer. We repeat each experiment for 10 times and report the mean and standard deviation." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 123, + 287, + 194 + ], + "lines": [ + { + "bbox": [ + 49, + 123, + 287, + 194 + ], + "spans": [ + { + "bbox": [ + 49, + 123, + 287, + 194 + ], + "type": "table", + "html": "
MethodsLearnable \nparams ratioParsingPose
ATR/pACCMPII/mAP
One-shot finetuning100%90.49±1.2270.6±7.53
One-shot prompt tuning<1%93.65±0.7783.8±5.08
Full-data finetuning100%97.7493.2
", + "image_path": "21cdedbfccf3930c3fe8302bc6e920b8c6322947ed495d4cd21e045fd76f849a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 49, + 264, + 287, + 338 + ], + "blocks": [ + { + "bbox": [ + 47, + 200, + 287, + 257 + ], + "lines": [ + { + "bbox": [ + 47, + 200, + 287, + 257 + ], + "spans": [ + { + "bbox": [ + 47, + 200, + 287, + 257 + ], + "type": "text", + "content": "Table 10. Comparison of different parameter-sharing schemes. We report the average scores of direct evaluation results on in-pretrain human-centric datasets. \"by " + }, + { + "bbox": [ + 47, + 200, + 287, + 257 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_t" + }, + { + "bbox": [ + 47, + 200, + 287, + 257 + ], + "type": "text", + "content": "\" denotes sharing decoder and interpreter across task types " + }, + { + "bbox": [ + 47, + 200, + 287, + 257 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_t" + }, + { + "bbox": [ + 47, + 200, + 287, + 257 + ], + "type": "text", + "content": ". For more detailed results on each dataset, please refer to the supplementary." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 264, + 287, + 338 + ], + "lines": [ + { + "bbox": [ + 49, + 264, + 287, + 338 + ], + "spans": [ + { + "bbox": [ + 49, + 264, + 287, + 338 + ], + "type": "table", + "html": "
MethodsTotal params.Shared params.Shared moduleAvg.
EncoderDecoderTask-guided Interpreter
Baseline109.32M109.08M67.4
(a)156.17M105.60M67.4
(b)489.67M91.07M60.6
(c)170.83M109.08Mby ttby tt65.0
", + "image_path": "1a47afd25b39f7a8b04a6ee1eab6ffc1c1ef1080321f295264022916bbc969c3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 349, + 216, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 349, + 216, + 361 + ], + "spans": [ + { + "bbox": [ + 47, + 349, + 216, + 361 + ], + "type": "text", + "content": "4.2.2 Cross-datasets Transfer Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "spans": [ + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": "As the task-guided interpreter formulates all task requests into four output units, knowledge learned behind these units can be easily transferred to unseen datasets. We conduct evaluations on five datasets that do not appear during pretraining to evaluate the transferability of UniHCP. UniHCP is finetuned to adapt to new datasets except for SenseReID, on which the performance is tested by direct evaluation. As shown in Table 8, UniHCP outperforms existing SOTAs in 4 out of 5 datasets. Specifically, UniHCP achieves " + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "inline_equation", + "content": "+0.35\\%" + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": " pACC, " + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "inline_equation", + "content": "+11.4\\%" + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": " top-1, " + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "inline_equation", + "content": "-1.6\\%" + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": " heavy occluded " + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "inline_equation", + "content": "\\mathrm{MR}^{-2}(\\downarrow)" + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "inline_equation", + "content": "+0.1\\%" + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": " mAP, and " + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "inline_equation", + "content": "+1.71\\%" + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": " mA on ATR, SenseReID, Caltech, MPII, and PETA, respectively. On MPII, UniHCP achieves on-par performance with multi-datasets trained SOTA while improving single-dataset trained SOTA by " + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "inline_equation", + "content": "+0.9\\%" + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": " mAP. Notably, even without finetuning, UniHCP achieves a " + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "inline_equation", + "content": "-8.8\\%" + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": " heavy occluded " + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "inline_equation", + "content": "\\mathrm{MR}^{-2}(\\downarrow)" + }, + { + "bbox": [ + 46, + 369, + 288, + 596 + ], + "type": "text", + "content": " performance gain on single-dataset trained SOTA. Consistent improvements on transfer tasks provide strong support to the decent transferability of UniHCP." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 610, + 198, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 610, + 198, + 623 + ], + "spans": [ + { + "bbox": [ + 47, + 610, + 198, + 623 + ], + "type": "text", + "content": "4.2.3 Data-Efficient Transferring" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "content": "As UniHCP achieves SOTAs on full-data finetuning setting, we further evaluate its potential for transferring to new datasets with extremely scarce training images, e.g., only one image per class for training. As summarized in Table 9, by conducting prompt tuning with one image per class, UniHCL achieves " + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "inline_equation", + "content": "93.65\\%" + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "content": " pACC on ATR for parsing and " + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "inline_equation", + "content": "83.8\\%" + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "content": " mAP on MPII for pose estimation, respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": "For prompt tuning on ATR, we follow [55]. For prompt tuning on MPII, we only update queries and their associate position embeddings. The prompt tuning results are close to that of the full-data finetuning setting and suppress the results of finetuning the whole model with one image per class for a large margin. Moreover, UniHCP with prompt tuning shows much lower standard deviations than one-shot finetuning on human parsing and pose estimation tasks, verifying that UniHCP learns generic human-centric representation which is beneficial for data-efficient transferring with low computation cost." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 210, + 491, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 210, + 491, + 224 + ], + "spans": [ + { + "bbox": [ + 305, + 210, + 491, + 224 + ], + "type": "text", + "content": "4.3. Ablation Study on Weight Sharing" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 228, + 545, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 228, + 545, + 445 + ], + "spans": [ + { + "bbox": [ + 304, + 228, + 545, + 445 + ], + "type": "text", + "content": "As UniHCP achieves desirable performance on various human-centric tasks while sharing most parameters among different tasks, one problem remains whether more task-specific parameters benefit learning. To answer the question, we ablate three weight sharing variants of UniHCP during pretraining using a 60k-iteration training schedule with 1k batch size. Results in Table 10(b) show that compared to the original UniHCP i.e., the Baseline), unifying task-guided interpreters among all tasks resulted in an average performance on par with using specific heads while reducing about " + }, + { + "bbox": [ + 304, + 228, + 545, + 445 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 304, + 228, + 545, + 445 + ], + "type": "text", + "content": " of the parameters. We also note that using task-specific or task-type-specific decoders and interpreters leads to an obvious " + }, + { + "bbox": [ + 304, + 228, + 545, + 445 + ], + "type": "inline_equation", + "content": "(-6.8\\%)" + }, + { + "bbox": [ + 304, + 228, + 545, + 445 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 228, + 545, + 445 + ], + "type": "inline_equation", + "content": "-2.4\\%" + }, + { + "bbox": [ + 304, + 228, + 545, + 445 + ], + "type": "text", + "content": " respectively) performance drop on average when compared to the original UniHCP (see results in Table 10(b) and (c)). We speculate that in these ablation settings, complementary humancentric knowledge can not be properly shared among tasks, which leads to performance drops on most tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 454, + 384, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 454, + 384, + 467 + ], + "spans": [ + { + "bbox": [ + 306, + 454, + 384, + 467 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 474, + 545, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 642 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 642 + ], + "type": "text", + "content": "In this work, we present a Unified Model for Human-Centric Perceptions (UniHCP). Based on a simple query-based task formulation, UniHCP can easily handle multiple distinctly defined human-centric tasks simultaneously. Extensive experiments on diverse datasets demonstrate that UniHCP pretrained on a massive collection of human-centric datasets delivers a competitive performance compared with task-specific models. When adapted to specific tasks, UniHCP obtains a series of SOTA performances over a wide spectrum of human-centric benchmarks. Further analysis also demonstrate the capability of UniHCP on parameter and data-efficient transfer and the benefit of weight sharing designs. We hope our work can motivate more future works on developing general human-centric models." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "text", + "content": "Acknowledgement. This paper was supported by the Australian Research Council Grant DP200103223, Australian Medical Research Future Fund MRFAI000085, CRC-P Smart Material Recovery Facility (SMRF) - Curby Soft Plastics, and CRC-P ARIA - Bionic Visual-Spatial Prosthesis for the Blind." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17847" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 58, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 58, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Mykhaylo Andriluka, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2d human pose estimation: New benchmark and state of the art analysis. In Proceedings of the IEEE Conference on computer Vision and Pattern Recognition, pages 3686-3693, 2014. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 147, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 147, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 58, + 147, + 288, + 201 + ], + "type": "text", + "content": "[2] Vamsi Aribandi, Yi Tay, Tal Schuster, Jinfeng Rao, Huaixiu Steven Zheng, Sanket Vaibhav Mehta, Honglei Zhuang, Vinh Q Tran, Dara Bahri, Jianmo Ni, et al. Ext5: Towards extreme multi-task scaling for transfer learning. arXiv preprint arXiv:2111.10952, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 202, + 288, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 202, + 288, + 245 + ], + "spans": [ + { + "bbox": [ + 57, + 202, + 288, + 245 + ], + "type": "text", + "content": "[3] FairScale authors. Fairscale: A general purpose modular pytorch library for high performance and large scale training. https://github.com/facebookresearch/fairscale, 2021.6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 247, + 288, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 247, + 288, + 290 + ], + "spans": [ + { + "bbox": [ + 57, + 247, + 288, + 290 + ], + "type": "text", + "content": "[4] Jiale Cao, Yanwei Pang, Jin Xie, Fahad Shahbaz Khan, and Ling Shao. From handcrafted to deep features for pedestrian detection: a survey. IEEE transactions on pattern analysis and machine intelligence, 2021. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 57, + 291, + 288, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 291, + 288, + 345 + ], + "spans": [ + { + "bbox": [ + 57, + 291, + 288, + 345 + ], + "type": "text", + "content": "[5] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 57, + 346, + 288, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 346, + 288, + 388 + ], + "spans": [ + { + "bbox": [ + 57, + 346, + 288, + 388 + ], + "type": "text", + "content": "[6] Ting Chen, Saurabh Saxena, Lala Li, Tsung-Yi Lin, David J Fleet, and Geoffrey Hinton. A unified sequence interface for vision tasks. arXiv preprint arXiv:2206.07669, 2022. 2, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 391, + 288, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 391, + 288, + 446 + ], + "spans": [ + { + "bbox": [ + 57, + 391, + 288, + 446 + ], + "type": "text", + "content": "[7] Yinpeng Chen, Xiyang Dai, Mengchen Liu, Dongdong Chen, Lu Yuan, and Zicheng Liu. Dynamic convolution: Attention over convolution kernels. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11030-11039, 2020. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 57, + 447, + 288, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 447, + 288, + 501 + ], + "spans": [ + { + "bbox": [ + 57, + 447, + 288, + 501 + ], + "type": "text", + "content": "[8] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1290–1299, 2022. 4, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 57, + 502, + 288, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 502, + 288, + 555 + ], + "spans": [ + { + "bbox": [ + 57, + 502, + 288, + 555 + ], + "type": "text", + "content": "[9] Xiao Chu, Wei Yang, Wanli Ouyang, Cheng Ma, Alan L Yuille, and Xiaogang Wang. Multi-context attention for human pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1831-1840, 2017. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 557, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 557, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 53, + 557, + 287, + 590 + ], + "type": "text", + "content": "[10] MMPose Contributors. Openmmlab pose estimation toolbox and benchmark. https://github.com/open-mmlab/mmpose, 2020.7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 591, + 288, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 591, + 288, + 645 + ], + "spans": [ + { + "bbox": [ + 53, + 591, + 288, + 645 + ], + "type": "text", + "content": "[11] Emily E Cust, Alice J Sweeting, Kevin Ball, and Sam Robertson. Machine and deep learning for sport-specific movement recognition: A systematic review of model development and performance. Journal of sports sciences, 37(5):568-600, 2019. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 647, + 288, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 647, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 53, + 647, + 288, + 690 + ], + "type": "text", + "content": "[12] Yubin Deng, Ping Luo, Chen Change Loy, and Xiaou Tang. Pedestrian attribute recognition at far distance. In Proceedings of the 22nd ACM international conference on Multimedia, pages 789-792, 2014. 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 691, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 691, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 53, + 691, + 288, + 712 + ], + "type": "text", + "content": "[13] Piotr Dollar, Christian Wojek, Bernt Schiele, and Pietro Perona. Pedestrian detection: An evaluation of the state of" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "type": "text", + "content": "the art. IEEE transactions on pattern analysis and machine intelligence, 34(4):743-761, 2011. 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 96, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 96, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 312, + 96, + 547, + 162 + ], + "type": "text", + "content": "[14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 3, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 163, + 547, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 163, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 312, + 163, + 547, + 228 + ], + "type": "text", + "content": "[15] Yuying Ge, Ruimao Zhang, Xiaogang Wang, Xiaou Tang, and Ping Luo. Deepfashion2: A versatile benchmark for detection, pose estimation, segmentation and re-identification of clothing images. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5337–5345, 2019. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 230, + 547, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 230, + 547, + 285 + ], + "spans": [ + { + "bbox": [ + 312, + 230, + 547, + 285 + ], + "type": "text", + "content": "[16] Ke Gong, Xiaodan Liang, Dongyu Zhang, Xiaohui Shen, and Liang Lin. Look into person: Self-supervised structure-sensitive learning and a new benchmark for human parsing. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 932-940, 2017. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 286, + 547, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 286, + 547, + 342 + ], + "spans": [ + { + "bbox": [ + 312, + 286, + 547, + 342 + ], + "type": "text", + "content": "[17] Irtiza Hasan, Shengcai Liao, Jinpeng Li, Saad Ullah Akram, and Ling Shao. Generalizable pedestrian detection: The elephant in the room. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11328-11337, 2021. 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 342, + 547, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 342, + 547, + 396 + ], + "spans": [ + { + "bbox": [ + 312, + 342, + 547, + 396 + ], + "type": "text", + "content": "[18] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16000-16009, 2022. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 398, + 547, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 398, + 547, + 453 + ], + "spans": [ + { + "bbox": [ + 312, + 398, + 547, + 453 + ], + "type": "text", + "content": "[19] Shuting He, Hao Luo, Pichao Wang, Fan Wang, Hao Li, and Wei Jiang. Transreid: Transformer-based object re-identification. In Proceedings of the IEEE/CVF international conference on computer vision, pages 15013–15022, 2021. 2, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 455, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 455, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 312, + 455, + 545, + 487 + ], + "type": "text", + "content": "[20] Fangzhou Hong, Liang Pan, Zhongang Cai, and Ziwei Liu. Versatile multi-modal pre-training for human-centric perception. arXiv preprint arXiv:2203.13815, 2022. 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 488, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 488, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 312, + 488, + 545, + 533 + ], + "type": "text", + "content": "[21] Ronghang Hu and Amanpreet Singh. Unit: Multimodal multitask learning with a unified transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1439-1449, 2021. 2, 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 534, + 545, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 534, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 312, + 534, + 545, + 586 + ], + "type": "text", + "content": "[22] Ruyi Ji, Dawei Du, Libo Zhang, Longyin Wen, Yanjun Wu, Chen Zhao, Feiyue Huang, and Siwei Lyu. Learning semantic neural tree for human parsing. In European Conference on Computer Vision, pages 205-221. Springer, 2020." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 590, + 547, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 590, + 547, + 634 + ], + "spans": [ + { + "bbox": [ + 312, + 590, + 547, + 634 + ], + "type": "text", + "content": "[23] Jian Jia, Xiaotang Chen, and Kaiqi Huang. Spatial and semantic consistency regularizations for pedestrian attribute recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pages 962-971, 2021. 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 635, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 635, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 312, + 635, + 545, + 669 + ], + "type": "text", + "content": "[24] Jian Jia, Naiyu Gao, Fei He, Xiaotang Chen, and Kaiqi Huang. Learning disentangled attribute representations for robust pedestrian attribute recognition. 2022. 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 312, + 670, + 545, + 713 + ], + "type": "text", + "content": "[25] Sheng Jin, Lumin Xu, Jin Xu, Can Wang, Wentao Liu, Chen Qian, Wanli Ouyang, and Ping Luo. Whole-body human pose estimation in the wild. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, Au" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17848" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 72, + 72, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 72, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 72, + 72, + 286, + 95 + ], + "type": "text", + "content": "gust 23-28, 2020, Proceedings, Part IX 16, pages 196-214. Springer, 2020. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 96, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 96, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 53, + 96, + 287, + 150 + ], + "type": "text", + "content": "[26] Xin Jin, Cuiling Lan, Wenjun Zeng, Guoqiang Wei, and Zhibo Chen. Semantics-aligned representation learning for person re-identification. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 11173–11180, 2020. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 152, + 288, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 152, + 288, + 207 + ], + "spans": [ + { + "bbox": [ + 53, + 152, + 288, + 207 + ], + "type": "text", + "content": "[27] Yannis Kalantidis, Lyndon Kennedy, and Li-Jia Li. Getting the look: clothing recognition and segmentation for automatic product suggestions in everyday photos. In Proceedings of the 3rd ACM conference on International Conference on multimedia retrieval, pages 105-112, 2013. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 208, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 208, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 53, + 208, + 288, + 262 + ], + "type": "text", + "content": "[28] Mahdi M Kalayeh, Emrah Basaran, Muhittin Gokmen, Mustafa E Kamasak, and Mubarak Shah. Human semantic parsing for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1062–1071, 2018. 1, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 264, + 288, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 264, + 288, + 308 + ], + "spans": [ + { + "bbox": [ + 53, + 264, + 288, + 308 + ], + "type": "text", + "content": "[29] Sameh Khamis, Cheng-Hao Kuo, Vivek K Singh, Vinay D Shet, and Larry S Davis. Joint learning for attribute-consistent person re-identification. In European conference on computer vision, pages 134–146. Springer, 2014. 1, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 309, + 288, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 309, + 288, + 363 + ], + "spans": [ + { + "bbox": [ + 53, + 309, + 288, + 363 + ], + "type": "text", + "content": "[30] Iasonas Kokkinos. Übernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6129-6138, 2017. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 365, + 288, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 365, + 288, + 408 + ], + "spans": [ + { + "bbox": [ + 53, + 365, + 288, + 408 + ], + "type": "text", + "content": "[31] Alexander Kolesnikov, André Susano Pinto, Lucas Beyer, Xiaohua Zhai, Jeremiah Harmsen, and Neil Houlsby. Uvim: A unified modeling approach for vision with learned guiding codes. arXiv preprint arXiv:2205.10337, 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 410, + 288, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 410, + 288, + 442 + ], + "spans": [ + { + "bbox": [ + 53, + 410, + 288, + 442 + ], + "type": "text", + "content": "[32] Jack Lanchantin, Tianlu Wang, Vicente Ordonez, and Yanjun Qi. General multi-label image classification with transformers. arXiv preprint arXiv:2011.14027, 2020. 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 444, + 288, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 444, + 288, + 476 + ], + "spans": [ + { + "bbox": [ + 53, + 444, + 288, + 476 + ], + "type": "text", + "content": "[33] Hei Law and Jia Deng. Cornernet: Detecting objects as paired keypoints. In Proceedings of the European conference on computer vision (ECCV), pages 734-750, 2018. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 478, + 288, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 478, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 53, + 478, + 288, + 510 + ], + "type": "text", + "content": "[34] Hei Law, Yun Teng, Olga Russakovsky, and Jia Deng. Cornernet-lite: Efficient keypoint based object detection. arXiv preprint arXiv:1904.08900, 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 511, + 288, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 511, + 288, + 555 + ], + "spans": [ + { + "bbox": [ + 53, + 511, + 288, + 555 + ], + "type": "text", + "content": "[35] Dangwei Li, Zhang Zhang, Xiaotang Chen, and Kaiqi Huang. A richly annotated pedestrian dataset for person retrieval in real surveillance scenarios. IEEE transactions on image processing, 28(4):1575-1590, 2019. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 557, + 288, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 557, + 288, + 600 + ], + "spans": [ + { + "bbox": [ + 53, + 557, + 288, + 600 + ], + "type": "text", + "content": "[36] Feng Li, Hao Zhang, Shilong Liu, Lei Zhang, Lionel M Ni, Heung-Yeung Shum, et al. Mask dino: Towards a unified transformer-based framework for object detection and segmentation. arXiv preprint arXiv:2206.02777, 2022. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 601, + 288, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 601, + 288, + 656 + ], + "spans": [ + { + "bbox": [ + 53, + 601, + 288, + 656 + ], + "type": "text", + "content": "[37] Jiefeng Li, Siyuan Bian, Ailing Zeng, Can Wang, Bo Pang, Wentao Liu, and Cewu Lu. Human pose regression with residual log-likelihood estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11025–11034, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 658, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 658, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 53, + 658, + 288, + 689 + ], + "type": "text", + "content": "[38] Peike Li, Yunqiu Xu, Yunchao Wei, and Yi Yang. Self-correction for human parsing. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 691, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 691, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 53, + 691, + 288, + 714 + ], + "type": "text", + "content": "[39] Suichan Li, Dapeng Chen, Bin Liu, Nenghai Yu, and Rui Zhao. Memory-based neighbourhood embedding for visual" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 331, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 331, + 73, + 545, + 105 + ], + "type": "text", + "content": "recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6102-6111, 2019. 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 107, + 545, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 107, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 312, + 107, + 545, + 152 + ], + "type": "text", + "content": "[40] Wanhua Li, Zhexuan Cao, Jianjiang Feng, Jie Zhou, and Jiwen Lu. Label2label: A language modeling framework for multi-attribute learning. In European Conference on Computer Vision, pages 562–579. Springer, 2022. 2, 5, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 153, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 153, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 312, + 153, + 545, + 184 + ], + "type": "text", + "content": "[41] Wei Li, Rui Zhao, Tong Xiao, and Xiaogang Wang. Deepreid: Deep filter pairing neural network for person re-identification. In CVPR, 2014. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 186, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 186, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 312, + 186, + 545, + 262 + ], + "type": "text", + "content": "[42] Xulin Li, Yan Lu, Bin Liu, Yating Liu, Guojun Yin, Qi Chu, Jinyang Huang, Feng Zhu, Rui Zhao, and Nenghai Yu. Counterfactual intervention feature transfer for visible-infrared person re-identification. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23–27, 2022, Proceedings, Part XXVI, pages 381–398. Springer, 2022. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 264, + 545, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 264, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 312, + 264, + 545, + 297 + ], + "type": "text", + "content": "[43] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He. Exploring plain vision transformer backbones for object detection. arXiv preprint arXiv:2203.16527, 2022. 2, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 298, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 298, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 312, + 298, + 545, + 353 + ], + "type": "text", + "content": "[44] Yanjie Li, Shoukui Zhang, Zhicheng Wang, Sen Yang, Wankou Yang, Shu-Tao Xia, and Erjin Zhou. Tokenpose: Learning keypoint tokens for human pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11313-11322, 2021. 2, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 354, + 545, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 354, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 312, + 354, + 545, + 387 + ], + "type": "text", + "content": "[45] Jianming Liang, Guanglu Song, Biao Leng, and Yu Liu. Unifying visual perception by dispersible points learning. arXiv preprint arXiv:2208.08630, 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 388, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 388, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 312, + 388, + 545, + 442 + ], + "type": "text", + "content": "[46] Xiaodan Liang, Ke Gong, Xiaohui Shen, and Liang Lin. Look into person: Joint body parsing & pose estimation network and a new benchmark. IEEE transactions on pattern analysis and machine intelligence, 41(4):871-885, 2018. 1, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 445, + 545, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 445, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 312, + 445, + 545, + 499 + ], + "type": "text", + "content": "[47] Xiaodan Liang, Chunyan Xu, Xiaohui Shen, Jianchao Yang, Si Liu, Jinhui Tang, Liang Lin, and Shuicheng Yan. Human parsing with contextualized convolutional neural network. In Proceedings of the IEEE international conference on computer vision, pages 1386-1394, 2015. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 501, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 501, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 312, + 501, + 545, + 544 + ], + "type": "text", + "content": "[48] Chunze Lin, Jiwen Lu, and Jie Zhou. Multi-grained deep feature learning for pedestrian detection. In 2018 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6. IEEE, 2018. 1, 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 545, + 545, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 545, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 312, + 545, + 545, + 588 + ], + "type": "text", + "content": "[49] Matthieu Lin, Chuming Li, Xingyuan Bu, Ming Sun, Chen Lin, Junjie Yan, Wanli Ouyang, and Zhidong Deng. Detr for crowd pedestrian detection. arXiv preprint arXiv:2012.06785, 2020. 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 590, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 590, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 312, + 590, + 545, + 634 + ], + "type": "text", + "content": "[50] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólar. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017. 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 312, + 635, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 312, + 635, + 545, + 689 + ], + "type": "text", + "content": "[51] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 312, + 691, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 691, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 312, + 691, + 545, + 714 + ], + "type": "text", + "content": "[52] Hao Liu, Jiashi Feng, Meibin Qi, Jianguo Jiang, and Shuicheng Yan. End-to-end comparative attention networks" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17849" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 73, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 72, + 73, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 73, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 72, + 73, + 287, + 95 + ], + "type": "text", + "content": "for person re-identification. IEEE Transactions on Image Processing, 26(7):3492-3506, 2017. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 96, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 96, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 53, + 96, + 287, + 150 + ], + "type": "text", + "content": "[53] Kunliang Liu, Ouk Choi, Jianming Wang, and Wonjun Hwang. Cdgnet: Class distribution guided network for human parsing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4473-4482, 2022. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 152, + 287, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 152, + 287, + 185 + ], + "spans": [ + { + "bbox": [ + 53, + 152, + 287, + 185 + ], + "type": "text", + "content": "[54] Shilong Liu, Lei Zhang, Xiao Yang, Hang Su, and Jun Zhu. Query2label: A simple transformer way to multi-label classification. arXiv preprint arXiv:2107.10834, 2021. 2, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 186, + 288, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 186, + 288, + 229 + ], + "spans": [ + { + "bbox": [ + 53, + 186, + 288, + 229 + ], + "type": "text", + "content": "[55] Xiao Liu, Kaixuan Ji, Yicheng Fu, Zhengxiao Du, Zhilin Yang, and Jie Tang. P-tuning v2: Prompt tuning can be comparable to fine-tuning universally across scales and tasks. arXiv preprint arXiv:2110.07602, 2021. 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 232, + 288, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 232, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 53, + 232, + 288, + 285 + ], + "type": "text", + "content": "[56] Xihui Liu, Haiyu Zhao, Maoqing Tian, Lu Sheng, Jing Shao, Shuai Yi, Junjie Yan, and Xiaogang Wang. Hydraplus-net: Attentive deep features for pedestrian analysis. In Proceedings of the IEEE international conference on computer vision, pages 350-359, 2017. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 287, + 288, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 287, + 288, + 331 + ], + "spans": [ + { + "bbox": [ + 53, + 287, + 288, + 331 + ], + "type": "text", + "content": "[57] Jiasen Lu, Christopher Clark, Rowan Zellers, Roozbeh Mottaghi, and Aniruddha Kembhavi. Unified-io: A unified model for vision, language, and multi-modal tasks. arXiv preprint arXiv:2206.08916, 2022. 2, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 332, + 288, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 332, + 288, + 387 + ], + "spans": [ + { + "bbox": [ + 53, + 332, + 288, + 387 + ], + "type": "text", + "content": "[58] Jiasen Lu, Vedanuj Goswami, Marcus Rohrbach, Devi Parikh, and Stefan Lee. 12-in-1: Multi-task vision and language representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10437-10446, 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 388, + 288, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 388, + 288, + 432 + ], + "spans": [ + { + "bbox": [ + 53, + 388, + 288, + 432 + ], + "type": "text", + "content": "[59] Weian Mao, Yongtao Ge, Chunhua Shen, Zhi Tian, Xinlong Wang, Zhibin Wang, and Anton van den Hengel. Poseur: Direct human pose regression with transformers. arXiv preprint arXiv:2201.07412, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 434, + 288, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 434, + 288, + 487 + ], + "spans": [ + { + "bbox": [ + 53, + 434, + 288, + 487 + ], + "type": "text", + "content": "[60] Tewodros Legesse Munea, Yalew Zelalem Jembre, Halefom Tekle Weldegebriel, Longbiao Chen, Chenxi Huang, and Chenhui Yang. The progress of human pose estimation: a survey and taxonomy of models applied in 2d human pose estimation. IEEE Access, 8:133330-133348, 2020. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 489, + 288, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 489, + 288, + 533 + ], + "spans": [ + { + "bbox": [ + 53, + 489, + 288, + 533 + ], + "type": "text", + "content": "[61] Xuecheng Nie, Jiashi Feng, and Shuicheng Yan. Mutual learning to adapt for joint human parsing and pose estimation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 502-517, 2018. 1, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 535, + 288, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 535, + 288, + 578 + ], + "spans": [ + { + "bbox": [ + 53, + 535, + 288, + 578 + ], + "type": "text", + "content": "[62] Wanli Ouyang, Xingyu Zeng, and Xiaogang Wang. Learning mutual visibility relationship for pedestrian detection with a deep model. International Journal of Computer Vision, 120:14-27, 2016. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 579, + 288, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 579, + 288, + 633 + ], + "spans": [ + { + "bbox": [ + 53, + 579, + 288, + 633 + ], + "type": "text", + "content": "[63] Wanli Ouyang, Hui Zhou, Hongsheng Li, Quanquan Li, Junjie Yan, and Xiaogang Wang. Jointly learning deep features, deformable parts, occlusion and classification for pedestrian detection. IEEE transactions on pattern analysis and machine intelligence, 40(8):1874-1887, 2017. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 635, + 288, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 635, + 288, + 678 + ], + "spans": [ + { + "bbox": [ + 53, + 635, + 288, + 678 + ], + "type": "text", + "content": "[64] Seyoung Park, Bruce Xiaohan Nie, and Song-Chun Zhu. Attribute and-or grammar for joint parsing of human pose, parts and attributes. IEEE transactions on pattern analysis and machine intelligence, 40(7):1555-1569, 2017. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 681, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 681, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 681, + 288, + 713 + ], + "type": "text", + "content": "[65] Sida Peng, Wen Jiang, Huaijin Pi, Xiuli Li, Hujun Bao, and Xiaowei Zhou. Deep snake for real-time instance segmentation. In Proceedings of the IEEE/CVF Conference" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "type": "text", + "content": "on Computer Vision and Pattern Recognition, pages 8533-8542, 2020. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 96, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 96, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 312, + 96, + 545, + 150 + ], + "type": "text", + "content": "[66] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J Liu, et al. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140):1-67, 2020. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 152, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 152, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 312, + 152, + 545, + 217 + ], + "type": "text", + "content": "[67] Hamid Rezatofighi, Nathan Tsoi, JunYoung Gwak, Amir Sadeghian, Ian Reid, and Silvio Savarese. Generalized intersection over union: A metric and a loss for bounding box regression. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 658-666, 2019. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 220, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 220, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 312, + 220, + 545, + 274 + ], + "type": "text", + "content": "[68] Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus, and Yann LeCun. Overfeat: Integrated recognition, localization and detection using convolutional networks. arXiv preprint arXiv:1312.6229, 2013. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 276, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 276, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 312, + 276, + 545, + 319 + ], + "type": "text", + "content": "[69] Shuai Shao, Zijian Zhao, Boxun Li, Tete Xiao, Gang Yu, Xiangyu Zhang, and Jian Sun. Crowdhuman: A benchmark for detecting human in a crowd. arXiv preprint arXiv:1805.00123, 2018. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 321, + 545, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 321, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 312, + 321, + 545, + 364 + ], + "type": "text", + "content": "[70] Noam Shazeer and Mitchell Stern. Adafactor: Adaptive learning rates with sublinear memory cost. In International Conference on Machine Learning, pages 4596-4604. PMLR, 2018. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 366, + 545, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 366, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 312, + 366, + 545, + 421 + ], + "type": "text", + "content": "[71] Chi Su, Fan Yang, Shiliang Zhang, Qi Tian, Larry Steven Davis, and Wen Gao. Multi-task learning with low rank attribute embedding for multi-camera person re-identification. IEEE transactions on pattern analysis and machine intelligence, 40(5):1167-1181, 2017. 1, 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 422, + 545, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 422, + 545, + 476 + ], + "spans": [ + { + "bbox": [ + 312, + 422, + 545, + 476 + ], + "type": "text", + "content": "[72] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5693-5703, 2019. 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 479, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 479, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 312, + 479, + 545, + 544 + ], + "type": "text", + "content": "[73] Peize Sun, Rufeng Zhang, Yi Jiang, Tao Kong, Chenfeng Xu, Wei Zhan, Masayoshi Tomizuka, Lei Li, Zehuan Yuan, Changhu Wang, et al. Sparse r-cnn: End-to-end object detection with learnable proposals. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 14454-14463, 2021. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 545, + 545, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 545, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 312, + 545, + 545, + 601 + ], + "type": "text", + "content": "[74] Yifan Sun, Changmao Cheng, Yuhan Zhang, Chi Zhang, Liang Zheng, Zhongdao Wang, and Yichen Wei. Circle loss: A unified perspective of pair similarity optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6398-6407, 2020. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 602, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 602, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 312, + 602, + 545, + 657 + ], + "type": "text", + "content": "[75] Chufeng Tang, Lu Sheng, Zhaoxiang Zhang, and Xiaolin Hu. Improving pedestrian attribute recognition with weakly-supervised multi-scale attribute-specific localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4997-5006, 2019. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 312, + 658, + 545, + 712 + ], + "type": "text", + "content": "[76] Yi Tang, Baopu Li, Min Liu, Boyu Chen, Yaonan Wang, and Wanli Ouyang. Autopedestrian: An automatic data augmentation and loss function search scheme for pedestrian detection. IEEE transactions on image processing, 30:8483-8496, 2021. 1" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17850" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 73, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 73, + 287, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 73, + 287, + 126 + ], + "spans": [ + { + "bbox": [ + 53, + 73, + 287, + 126 + ], + "type": "text", + "content": "[77] Yonglong Tian, Ping Luo, Xiaogang Wang, and Xiaou Tang. Pedestrian detection aided by deep learning semantic tasks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5079-5087, 2015. 1, 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 129, + 287, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 129, + 287, + 172 + ], + "spans": [ + { + "bbox": [ + 53, + 129, + 287, + 172 + ], + "type": "text", + "content": "[78] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 173, + 288, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 173, + 288, + 238 + ], + "spans": [ + { + "bbox": [ + 53, + 173, + 288, + 238 + ], + "type": "text", + "content": "[79] Guan'an Wang, Shuo Yang, Huanyu Liu, Zhicheng Wang, Yang Yang, Shuliang Wang, Gang Yu, Erjin Zhou, and Jian Sun. High-order information matters: Learning relation and topology for occluded person re-identification. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6449–6458, 2020. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 239, + 288, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 288, + 304 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 288, + 304 + ], + "type": "text", + "content": "[80] Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai, Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In International Conference on Machine Learning, pages 23318-23340. PMLR, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 304, + 288, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 304, + 288, + 359 + ], + "spans": [ + { + "bbox": [ + 53, + 304, + 288, + 359 + ], + "type": "text", + "content": "[81] Wenguan Wang, Hailong Zhu, Jifeng Dai, Yanwei Pang, Jianbing Shen, and Ling Shao. Hierarchical human parsing with typed part-relation reasoning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8929-8939, 2020. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 360, + 287, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 360, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 53, + 360, + 287, + 403 + ], + "type": "text", + "content": "[82] Yingming Wang, Xiangyu Zhang, Tong Yang, and Jian Sun. Anchor detr: Query design for transformer-based detector. In Proceedings of the AAAI conference on artificial intelligence, volume 36, pages 2567-2575, 2022. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 404, + 287, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 404, + 287, + 468 + ], + "spans": [ + { + "bbox": [ + 53, + 404, + 287, + 468 + ], + "type": "text", + "content": "[83] Jiahong Wu, He Zheng, Bo Zhao, Yixin Li, Baoming Yan, Rui Liang, Wenjia Wang, Shipei Zhou, Guosen Lin, Yanwei Fu, et al. Large-scale datasets for going deeper in image understanding. In 2019 IEEE International Conference on Multimedia and Expo (ICME), pages 1480-1485. IEEE, 2019. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 470, + 287, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 470, + 287, + 514 + ], + "spans": [ + { + "bbox": [ + 53, + 470, + 287, + 514 + ], + "type": "text", + "content": "[84] Bin Xiao, Haiping Wu, and Yichen Wei. Simple baselines for human pose estimation and tracking. In Proceedings of the European conference on computer vision (ECCV), pages 466-481, 2018. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 514, + 287, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 287, + 579 + ], + "type": "text", + "content": "[85] Enze Xie, Peize Sun, Xiaoge Song, Wenhai Wang, Xuebo Liu, Ding Liang, Chunhua Shen, and Ping Luo. Polarmask: Single shot instance segmentation with polar representation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12193-12202, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 581, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 581, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 53, + 581, + 287, + 635 + ], + "type": "text", + "content": "[86] Jing Xu, Rui Zhao, Feng Zhu, Huaming Wang, and Wanli Ouyang. Attention-aware compositional network for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2119–2128, 2018. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 636, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 636, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 53, + 636, + 287, + 689 + ], + "type": "text", + "content": "[87] Shilin Xu, Xiangtai Li, Jingbo Wang, Guangliang Cheng, Yunhai Tong, and Dacheng Tao. Fashionformer: A simple, effective and unified baseline for human fashion segmentation and recognition. arXiv preprint arXiv:2204.04654, 2022.1,2,3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 691, + 287, + 713 + ], + "type": "text", + "content": "[88] Yufei Xu, Jing Zhang, Qiming Zhang, and Dacheng Tao. Vitpose: Simple vision transformer baselines for human" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "type": "text", + "content": "pose estimation. arXiv preprint arXiv:2204.12484, 2022. 2,6,7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 96, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 96, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 312, + 96, + 545, + 140 + ], + "type": "text", + "content": "[89] Qize Yang, Ancong Wu, and Wei-Shi Zheng. Person re-identification by contour sketch under moderate clothing change. IEEE transactions on pattern analysis and machine intelligence, 43(6):2029–2046, 2019. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 312, + 141, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 141, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 312, + 141, + 545, + 185 + ], + "type": "text", + "content": "[90] Sen Yang, Zhibin Quan, Mu Nie, and Wankou Yang. Transpose: Keypoint localization via transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11802-11812, 2021. 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 186, + 545, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 186, + 545, + 230 + ], + "spans": [ + { + "bbox": [ + 312, + 186, + 545, + 230 + ], + "type": "text", + "content": "[91] Wei Yang, Shuang Li, Wanli Ouyang, Hongsheng Li, and Xiaogang Wang. Learning feature pyramids for human pose estimation. In Proceedings of the IEEE international conference on computer vision, pages 1281-1290, 2017. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 232, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 232, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 312, + 232, + 545, + 285 + ], + "type": "text", + "content": "[92] Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, et al. Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432, 2021. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 287, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 287, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 312, + 287, + 545, + 342 + ], + "type": "text", + "content": "[93] Yuhui Yuan, Rao Fu, Lang Huang, Weihong Lin, Chao Zhang, Xilin Chen, and Jingdong Wang. Hrformer: High-resolution vision transformer for dense predict. Advances in Neural Information Processing Systems, 34:7281-7293, 2021. 2, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 343, + 545, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 343, + 545, + 388 + ], + "spans": [ + { + "bbox": [ + 312, + 343, + 545, + 388 + ], + "type": "text", + "content": "[94] Xiaohua Zhai, Alexander Kolesnikov, Neil Houlsby, and Lucas Beyer. Scaling vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12104-12113, 2022. 2, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 388, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 388, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 312, + 388, + 545, + 453 + ], + "type": "text", + "content": "[95] Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18123-18133, 2022. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 456, + 545, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 456, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 312, + 456, + 545, + 510 + ], + "type": "text", + "content": "[96] Shanshan Zhang, Rodrigo Benenson, Mohamed Omran, Jan Hosang, and Bernt Schiele. How far are we from solving pedestrian detection? In Proceedings of the iEEE conference on computer vision and pattern recognition, pages 1259-1267, 2016. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 512, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 512, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 312, + 512, + 545, + 555 + ], + "type": "text", + "content": "[97] Shifeng Zhang, Yiliang Xie, Jun Wan, Hansheng Xia, Stan Z Li, and Guodong Guo. Widerperson: A diverse dataset for dense pedestrian detection in the wild. IEEE Transactions on Multimedia, 22(2):380-393, 2019. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 557, + 545, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 557, + 545, + 621 + ], + "spans": [ + { + "bbox": [ + 312, + 557, + 545, + 621 + ], + "type": "text", + "content": "[98] Song-Hai Zhang, Ruilong Li, Xin Dong, Paul Rosin, Zixi Cai, Xi Han, Dingcheng Yang, Haozhi Huang, and ShiMin Hu. Pose2seg: Detection free human instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 889-898, 2019. 1, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 624, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 624, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 312, + 624, + 545, + 678 + ], + "type": "text", + "content": "[99] Xiaomei Zhang, Yingying Chen, Bingke Zhu, Jinqiao Wang, and Ming Tang. Part-aware context network for human parsing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8971-8980, 2020. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "text", + "content": "[100] Haiyu Zhao, Maoqing Tian, Shuyang Sun, Jing Shao, Junjie Yan, Shuai Yi, Xiaogang Wang, and Xiaou Tang. Spindle net: Person re-identification with human body region" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17851" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 408 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 72, + 72, + 288, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 72, + 288, + 106 + ], + "spans": [ + { + "bbox": [ + 72, + 72, + 288, + 106 + ], + "type": "text", + "content": "guided feature decomposition and fusion. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1077-1085, 2017. 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 288, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 288, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 288, + 150 + ], + "type": "text", + "content": "[101] Rui Zhao, Wanli Ouyang, and Xiaogang Wang. Unsupervised salience learning for person re-identification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3586–3593, 2013. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 288, + 205 + ], + "type": "text", + "content": "[102] Anlin Zheng, Yuang Zhang, Xiangyu Zhang, Xiaojuan Qi, and Jian Sun. Progressive end-to-end object detection in crowded scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 857-866, 2022. 2, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 208, + 288, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 288, + 251 + ], + "type": "text", + "content": "[103] Zhedong Zheng, Liang Zheng, and Yi Yang. A discriminatively learned cnn embedding for person reidentification. ACM transactions on multimedia computing, communications, and applications (TOMM), 14(1):1-20, 2017. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 253, + 288, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 253, + 288, + 296 + ], + "spans": [ + { + "bbox": [ + 48, + 253, + 288, + 296 + ], + "type": "text", + "content": "[104] Jianqing Zhu, Shengcai Liao, Zhen Lei, and Stan Z Li. Multi-label convolutional neural network based pedestrian attribute classification. Image and Vision Computing, 58:224-229, 2017. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 297, + 288, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 297, + 288, + 341 + ], + "spans": [ + { + "bbox": [ + 48, + 297, + 288, + 341 + ], + "type": "text", + "content": "[105] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159, 2020. 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 342, + 288, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 288, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 288, + 408 + ], + "type": "text", + "content": "[106] Xizhou Zhu, Jinguo Zhu, Hao Li, Xiaoshi Wu, Hongsheng Li, Xiaohua Wang, and Jifeng Dai. Uni-perceiver: Pretraining unified architecture for generic perception for zero-shot and few-shot tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16804-16815, 2022. 2" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17852" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file